diff --git a/go.mod b/go.mod index 990f77f..f03b59a 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,11 @@ go 1.14 require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7 + github.com/taskcluster/pulse-go v1.0.0 + github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0 github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384 go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44 + launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect ) diff --git a/go.sum b/go.sum index 83811ba..30a92d4 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,54 @@ +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 h1:2MR0pKUzlP3SGgj5NYJe/zRYDwOu9ku6YHy+Iw7l5DM= +github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7 h1:5KNW+VDCZxdFJasHPW1AehzhTIm69pHQq2psutjjXRk= github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/taskcluster/httpbackoff/v3 v3.0.0 h1:Zh2BCW2iA3fzBBuZo2E4MvwyPSB6aimyI4EreeK3TRM= +github.com/taskcluster/httpbackoff/v3 v3.0.0/go.mod h1:99ubellEC0fmRj7wnGkPftT2xfCY7NmbjT3gzn2ZPUM= +github.com/taskcluster/jsonschema2go v1.0.0 h1:ZEDj2NKh8Sceq36zyLhSV6ann/aNXKZIe9cAXq7CDdk= +github.com/taskcluster/jsonschema2go v1.0.0/go.mod h1:jhsT3XPj3iLNRx0efJVfFzZBZgxeYE7IHfZAai8wuKQ= +github.com/taskcluster/pulse-go v1.0.0 h1:ys4ZUNp5TYiV5LSMxge4YF/AtuBUNH9StAA/bkno+r0= +github.com/taskcluster/pulse-go v1.0.0/go.mod h1:uuaqnRQj9XqouabCEKjnrlJiC6UT9Gurx2oSe6s+irM= +github.com/taskcluster/slugid-go v1.1.0 h1:SWsUplliyamdYzOKVM4+lDohZKuL63fKreGkvIKJ9aI= +github.com/taskcluster/slugid-go v1.1.0/go.mod h1:5sOAcPHjqso1UkKxSl77CkKgOwha0D9X0msBKBj0AOg= +github.com/taskcluster/taskcluster-base-go v1.0.0 h1:Jh2R/J7+a23LjtYEHQtkFV04QBxx6EVX8E0PrzUqJo4= +github.com/taskcluster/taskcluster-base-go v1.0.0/go.mod h1:ByyzyqqufsfZTrAHUw+0Grp8FwZAizZOKzVE1IpDXxQ= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible h1:57WLzh7B04y6ahTOJ8wjvdkbwYqnyJkwLXQ1Tu4E/DU= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible/go.mod h1:ALqTgi15AmJGEGubRKM0ydlLAFatlQPrQrmal9YZpQs= +github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0 h1:QQDQUy8zHLdfMF+EQEkA+NRGfjdykhHxNcPMakr+sm8= +github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0/go.mod h1:5NZbdHaaF2/yjkHtlcEKm4AXs3EH3M5Kih0wAEGRlMs= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 h1:6Fre/uvwovW5YY4nfHZk66cAg9HjT9YdFSAJHUUgOyQ= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957/go.mod h1:dch7ywQEefE1ibFqBG1erFibrdUIwovcwQjksYuHuP4= github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384 h1:nYZrQfefkHIWNPwwIToQOnTeZ5nQt4nUHiS4wHQBWSA= github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44 h1:ZwI2d4V8jjame21x82EXAGuKKStymcxBsNt0hkN4/5E= go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= diff --git a/main.go b/main.go index f639bcd..8b61a18 100644 --- a/main.go +++ b/main.go @@ -9,6 +9,8 @@ import ( "go.mozilla.org/mozlog" "github.com/urfave/cli" + + "github.com/taskcluster/pulse-go/pulse" ) func init() { @@ -47,6 +49,34 @@ func main() { Usage: "Password for authing against jenkins", EnvVar: "JENKINS_PASSWORD", }, + cli.StringFlag{ + Name: "cloudops-pulse-prefix", + Usage: "Pulse route to listen to.", + Value: "cloudops.v1.deploy", + EnvVar: "CLOUDOPS_PULSE_PREFIX", + }, + cli.StringFlag{ + Name: "pulse-queue", + Usage: "Pulse quue to listen to.", + Value: "deploy-proxy", + EnvVar: "PULSE_QUEUE", + }, + cli.StringFlag{ + Name: "pulse-username", + Usage: "Username for authing against pulse", + EnvVar: "PULSE_USERNAME", + }, + cli.StringFlag{ + Name: "pulse-password", + Usage: "Password for authing against pulse", + EnvVar: "PULSE_PASSWORD", + }, + cli.StringFlag{ + Name: "pulse-password", + Usage: "Password for authing against pulse", + Value: "", + EnvVar: "PULSE_HOST", + }, } app.Action = func(c *cli.Context) error { @@ -74,6 +104,21 @@ func main() { w.Write([]byte("OK")) }) + pulse := pulse.NewConnection( + c.String("pulse-username"), + c.String("pulse-password"), + c.String("pulse-host"), + ) + taskclusterPulseHandler := proxyservice.NewTaskclusterPulseHandler( + jenkins, + &pulse, + c.String("cloudops-pulse-prefix"), + ) + + if err := taskclusterPulseHandler.Consume(); err != nil { + return cli.NewExitError(fmt.Sprintf("Could not listen to pulse: %v", err), 1) + } + server := &http.Server{ Addr: c.String("addr"), Handler: mux, diff --git a/proxyservice/jenkins.go b/proxyservice/jenkins.go index 018c985..94a71ed 100644 --- a/proxyservice/jenkins.go +++ b/proxyservice/jenkins.go @@ -116,3 +116,25 @@ func (j *Jenkins) TriggerDockerhubJob(data *DockerHubWebhookData) error { params.Set("RawJSON", string(rawJSON)) return j.TriggerJob(path, params) } + +// triggers a jenkins job given +func (j *Jenkins) TriggerTaskclusterJob(taskID string, route string, data *TaskCompletedMessage) error { + if !regexp.MustCompile(`^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$`).MatchString(taskID) { + return fmt.Errorf("Invalid taskID: %s", taskID) + } + // FIXME: This should probably be split on . + if !regexp.MustCompile(`^[a-zA-Z0-9_\-]{2,255}$`).MatchString(route) { + return fmt.Errorf("Invalid data.Repository.Namespace: %s", route) + } + + rawJSON, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("Error marshaling data: %v", err) + } + path := path.Join("/job/taskcluster/job", + route, "job", route) + params := url.Values{} + params.Set("TASK_ID", taskID) + params.Set("RawJSON", string(rawJSON)) + return j.TriggerJob(path, params) +} diff --git a/proxyservice/taskcluster.go b/proxyservice/taskcluster.go new file mode 100644 index 0000000..186280f --- /dev/null +++ b/proxyservice/taskcluster.go @@ -0,0 +1,70 @@ +package proxyservice + +import ( + "fmt" + "log" + "strings" + + "github.com/streadway/amqp" + "github.com/taskcluster/pulse-go/pulse" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents" +) + +type TaskCompletedMessage = tcqueueevents.TaskCompletedMessage + +type routeTaskCompleted struct { + Route string +} + +func (binding routeTaskCompleted) RoutingKey() string { + return fmt.Sprintf("route.%s", binding.Route) +} + +func (binding routeTaskCompleted) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-completed" +} + +func (binding routeTaskCompleted) NewPayloadObject() interface{} { + return new(tcqueueevents.TaskCompletedMessage) +} + +type TaskclusterPulseHandler struct { + Jenkins *Jenkins + Pulse *pulse.Connection + PulseRoutePrefix string +} + +func NewTaskclusterPulseHandler(jenkins *Jenkins, pulse *pulse.Connection, routePrefix string) *TaskclusterPulseHandler { + return &TaskclusterPulseHandler{ + Jenkins: jenkins, + Pulse: pulse, + PulseRoutePrefix: routePrefix, + } +} + +func (handler *TaskclusterPulseHandler) handleMessage(message interface{}, delivery amqp.Delivery) { + routingKeyPrefix := "route." + handler.PulseRoutePrefix + switch t := message.(type) { + case *tcqueueevents.TaskCompletedMessage: + if strings.HasPrefix(delivery.RoutingKey, routingKeyPrefix) { + route := strings.TrimPrefix(delivery.RoutingKey, routingKeyPrefix) + if err := handler.Jenkins.TriggerTaskclusterJob(t.Status.TaskID, route, t); err != nil { + log.Printf("Error triggering taskcluster job: %s", err) + } + } + } + delivery.Ack(false) // acknowledge message *after* processing + +} + +func (handler *TaskclusterPulseHandler) Consume() error { + routingKeyPrefix := "route." + handler.PulseRoutePrefix + _, err := handler.Pulse.Consume( + "", // queue name + handler.handleMessage, + 1, // prefetch 1 message at a time + false, // don't autoacknowledge messages + routeTaskCompleted{Route: routingKeyPrefix + ".#"}, + ) + return err +} diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v3/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml new file mode 100644 index 0000000..47a6a46 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.7 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v3/LICENSE new file mode 100644 index 0000000..89b8179 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v3/README.md new file mode 100644 index 0000000..55ebc98 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +See https://godoc.org/github.com/cenkalti/backoff#pkg-examples + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v3/backoff.go new file mode 100644 index 0000000..3676ee4 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v3/context.go new file mode 100644 index 0000000..7706faa --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/context.go @@ -0,0 +1,63 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func ensureContext(b BackOff) BackOffContext { + if cb, ok := b.(BackOffContext); ok { + return cb + } + return WithContext(b, context.Background()) +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { + return Stop + } + return next +} diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v3/exponential.go new file mode 100644 index 0000000..a031a65 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/exponential.go @@ -0,0 +1,153 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/go.mod b/vendor/github.com/cenkalti/backoff/v3/go.mod new file mode 100644 index 0000000..479e62a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v3 + +go 1.12 diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v3/retry.go new file mode 100644 index 0000000..e936a50 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/retry.go @@ -0,0 +1,82 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + var t *time.Timer + + cb := ensureContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = cb.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + if t == nil { + t = time.NewTimer(next) + defer t.Stop() + } else { + t.Reset(next) + } + + select { + case <-cb.Context().Done(): + return err + case <-t.C: + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v3/ticker.go new file mode 100644 index 0000000..e41084b --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/ticker.go @@ -0,0 +1,82 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOffContext + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: ensureContext(b), + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.b.Context().Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v3/tries.go new file mode 100644 index 0000000..cfeefd9 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml new file mode 100644 index 0000000..3489e38 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: 1.x + diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md new file mode 100644 index 0000000..aa4a536 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md new file mode 100644 index 0000000..105a6ae --- /dev/null +++ b/vendor/github.com/fatih/camelcase/README.md @@ -0,0 +1,58 @@ +# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) + +CamelCase is a Golang (Go) package to split the words of a camelcase type +string into a slice of words. It can be used to convert a camelcase word (lower +or upper case) into any type of word. + +## Splitting rules: + +1. If string is not valid UTF-8, return it without splitting as + single item array. +2. Assign all unicode characters into one of 4 sets: lower case + letters, upper case letters, numbers, and all other characters. +3. Iterate through characters of string, introducing splits + between adjacent characters that belong to different sets. +4. Iterate through array of split strings, and if a given string + is upper case: + * if subsequent string is lower case: + * move last character of upper case string to beginning of + lower case string + +## Install + +```bash +go get github.com/fatih/camelcase +``` + +## Usage and examples + +```go +splitted := camelcase.Split("GolangPackage") + +fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" +``` + +Both lower camel case and upper camel case are supported. For more info please +check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) + +Below are some example cases: + +``` +"" => [] +"lowercase" => ["lowercase"] +"Class" => ["Class"] +"MyClass" => ["My", "Class"] +"MyC" => ["My", "C"] +"HTML" => ["HTML"] +"PDFLoader" => ["PDF", "Loader"] +"AString" => ["A", "String"] +"SimpleXMLParser" => ["Simple", "XML", "Parser"] +"vimRPCPlugin" => ["vim", "RPC", "Plugin"] +"GL11Version" => ["GL", "11", "Version"] +"99Bottles" => ["99", "Bottles"] +"May5" => ["May", "5"] +"BFG9000" => ["BFG", "9000"] +"BöseÜberraschung" => ["Böse", "Überraschung"] +"Two spaces" => ["Two", " ", "spaces"] +"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go new file mode 100644 index 0000000..02160c9 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/camelcase.go @@ -0,0 +1,90 @@ +// Package camelcase is a micro package to split the words of a camelcase type +// string into a slice of words. +package camelcase + +import ( + "unicode" + "unicode/utf8" +) + +// Split splits the camelcase word and returns a list of words. It also +// supports digits. Both lower camel case and upper camel case are supported. +// For more info please check: http://en.wikipedia.org/wiki/CamelCase +// +// Examples +// +// "" => [""] +// "lowercase" => ["lowercase"] +// "Class" => ["Class"] +// "MyClass" => ["My", "Class"] +// "MyC" => ["My", "C"] +// "HTML" => ["HTML"] +// "PDFLoader" => ["PDF", "Loader"] +// "AString" => ["A", "String"] +// "SimpleXMLParser" => ["Simple", "XML", "Parser"] +// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] +// "GL11Version" => ["GL", "11", "Version"] +// "99Bottles" => ["99", "Bottles"] +// "May5" => ["May", "5"] +// "BFG9000" => ["BFG", "9000"] +// "BöseÜberraschung" => ["Böse", "Überraschung"] +// "Two spaces" => ["Two", " ", "spaces"] +// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +// +// Splitting rules +// +// 1) If string is not valid UTF-8, return it without splitting as +// single item array. +// 2) Assign all unicode characters into one of 4 sets: lower case +// letters, upper case letters, numbers, and all other characters. +// 3) Iterate through characters of string, introducing splits +// between adjacent characters that belong to different sets. +// 4) Iterate through array of split strings, and if a given string +// is upper case: +// if subsequent string is lower case: +// move last character of upper case string to beginning of +// lower case string +func Split(src string) (entries []string) { + // don't split invalid utf8 + if !utf8.ValidString(src) { + return []string{src} + } + entries = []string{} + var runes [][]rune + lastClass := 0 + class := 0 + // split into fields based on class of unicode character + for _, r := range src { + switch true { + case unicode.IsLower(r): + class = 1 + case unicode.IsUpper(r): + class = 2 + case unicode.IsDigit(r): + class = 3 + default: + class = 4 + } + if class == lastClass { + runes[len(runes)-1] = append(runes[len(runes)-1], r) + } else { + runes = append(runes, []rune{r}) + } + lastClass = class + } + // handle upper case -> lower case sequences, e.g. + // "PDFL", "oader" -> "PDF", "Loader" + for i := 0; i < len(runes)-1; i++ { + if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { + runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) + runes[i] = runes[i][:len(runes[i])-1] + } + } + // construct []string from results + for _, s := range runes { + if len(s) > 0 { + entries = append(entries, string(s)) + } + } + return +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 0000000..d8156a6 --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000..04fdf09 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000..b4bb97f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000..5dc6826 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000..9d92c11 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000..fa820b9 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000..5b8a4b9 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000..b174616 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000..7f9e0c6 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000..3e4e90d --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000..24b78ed --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000..0cbbcdd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000..f326b54 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000..e6ef06c --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000..5ea6c73 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000..7f3643f --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,198 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + if len(s) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + if len(b) != 36 { + if len(b) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + } + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000..199a1ac --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000..84af91c --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml new file mode 100644 index 0000000..3deb4a1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - "1.9" + - "1.10" + - "1.11" + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md new file mode 100644 index 0000000..04fdf09 --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS new file mode 100644 index 0000000..b382a04 --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTORS @@ -0,0 +1 @@ +Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE new file mode 100644 index 0000000..5dc6826 --- /dev/null +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md new file mode 100644 index 0000000..810ad40 --- /dev/null +++ b/vendor/github.com/pborman/uuid/README.md @@ -0,0 +1,15 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100644 index 0000000..50a0f2d --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100644 index 0000000..727d761 --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,13 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// This package is a partial wrapper around the github.com/google/uuid package. +// This package represents a UUID as []byte while github.com/google/uuid +// represents a UUID as [16]byte. +package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod new file mode 100644 index 0000000..099fc7d --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.mod @@ -0,0 +1,3 @@ +module github.com/pborman/uuid + +require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum new file mode 100644 index 0000000..db2574a --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.sum @@ -0,0 +1,2 @@ +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 0000000..a0420c1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go new file mode 100644 index 0000000..35b8935 --- /dev/null +++ b/vendor/github.com/pborman/uuid/marshal.go @@ -0,0 +1,85 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" + + guuid "github.com/google/uuid" +) + +// MarshalText implements encoding.TextMarshaler. +func (u UUID) MarshalText() ([]byte, error) { + if len(u) != 16 { + return nil, nil + } + var js [36]byte + encodeHex(js[:], u) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *UUID) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + id := Parse(string(data)) + if id == nil { + return errors.New("invalid UUID") + } + *u = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u UUID) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + var id [16]byte + copy(id[:], data) + *u = id[:] + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (u Array) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], u[:]) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *Array) UnmarshalText(data []byte) error { + id, err := guuid.ParseBytes(data) + if err != nil { + return err + } + *u = Array(id) + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u Array) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *Array) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(u[:], data) + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100644 index 0000000..e524e01 --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,50 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + guuid "github.com/google/uuid" +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return guuid.NodeInterface() +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + return guuid.SetNodeInterface(name) +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + return guuid.NodeID() +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + return guuid.SetNodeID(id) +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 0000000..929c384 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + parsed := make([]byte, 16) + copy(parsed, b) + *uuid = UUID(parsed) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100644 index 0000000..5c0960d --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,57 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + + guuid "github.com/google/uuid" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time = guuid.Time + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { return guuid.GetTime() } + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { return guuid.ClockSequence() } + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 0000000..255b5e2 --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,32 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 0000000..3370004 --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,162 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "io" + + guuid "github.com/google/uuid" +) + +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return guuid.UUID(uuid).String() +} + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version = guuid.Version + +// A Variant represents a UUIDs variant. +type Variant = guuid.Variant + +// Constants returned by Variant. +const ( + Invalid = guuid.Invalid // Invalid UUID + RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 + Reserved = guuid.Reserved // Reserved, NCS backward compatibility. + Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future = guuid.Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for +// the formats parsed. +func Parse(s string) UUID { + gu, err := guuid.Parse(s) + if err == nil { + return gu[:] + } + return nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + gu, err := guuid.ParseBytes(b) + if err == nil { + return gu[:], nil + } + return nil, err +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + guuid.SetRand(r) +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 0000000..7af948d --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,23 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + guuid "github.com/google/uuid" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + gu, err := guuid.NewUUID() + if err == nil { + return UUID(gu[:]) + } + return nil +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 0000000..b459d46 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,26 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import guuid "github.com/google/uuid" + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + if gu, err := guuid.NewRandom(); err == nil { + return UUID(gu[:]) + } + return nil +} diff --git a/vendor/github.com/streadway/amqp/.gitignore b/vendor/github.com/streadway/amqp/.gitignore new file mode 100644 index 0000000..667fb50 --- /dev/null +++ b/vendor/github.com/streadway/amqp/.gitignore @@ -0,0 +1,12 @@ +certs/* +spec/spec +examples/simple-consumer/simple-consumer +examples/simple-producer/simple-producer + +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +.idea/**/contentModel.xml diff --git a/vendor/github.com/streadway/amqp/.travis.yml b/vendor/github.com/streadway/amqp/.travis.yml new file mode 100644 index 0000000..7eee262 --- /dev/null +++ b/vendor/github.com/streadway/amqp/.travis.yml @@ -0,0 +1,25 @@ +language: go + +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + +addons: + apt: + packages: + - rabbitmq-server + +services: + - rabbitmq + +env: + - GO111MODULE=on AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ + +before_install: + - go get -v golang.org/x/lint/golint + +script: + - ./pre-commit + - go test -cpu=1,2 -v -tags integration ./... diff --git a/vendor/github.com/streadway/amqp/CONTRIBUTING.md b/vendor/github.com/streadway/amqp/CONTRIBUTING.md new file mode 100644 index 0000000..c87f3d7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/CONTRIBUTING.md @@ -0,0 +1,35 @@ +## Prequisites + +1. Go: [https://golang.org/dl/](https://golang.org/dl/) +1. Golint `go get -u -v github.com/golang/lint/golint` + +## Contributing + +The workflow is pretty standard: + +1. Fork github.com/streadway/amqp +1. Add the pre-commit hook: `ln -s ../../pre-commit .git/hooks/pre-commit` +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Run integration tests (see below) +1. **Implement tests** +1. Implement fixs +1. Commit your changes (`git commit -am 'Add some feature'`) +1. Push to a branch (`git push -u origin my-new-feature`) +1. Submit a pull request + +## Running Tests + +The test suite assumes that: + + * A RabbitMQ node is running on localhost with all defaults: [https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html) + * `AMQP_URL` is exported to `amqp://guest:guest@127.0.0.1:5672/` + +### Integration Tests + +After starting a local RabbitMQ, run integration tests with the following: + + env AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ go test -v -cpu 2 -tags integration -race + +All integration tests should use the `integrationConnection(...)` test +helpers defined in `integration_test.go` to setup the integration environment +and logging. diff --git a/vendor/github.com/streadway/amqp/LICENSE b/vendor/github.com/streadway/amqp/LICENSE new file mode 100644 index 0000000..07b8968 --- /dev/null +++ b/vendor/github.com/streadway/amqp/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012-2019, Sean Treadway, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/streadway/amqp/README.md b/vendor/github.com/streadway/amqp/README.md new file mode 100644 index 0000000..287830b --- /dev/null +++ b/vendor/github.com/streadway/amqp/README.md @@ -0,0 +1,93 @@ +[![Build Status](https://api.travis-ci.org/streadway/amqp.svg)](http://travis-ci.org/streadway/amqp) [![GoDoc](https://godoc.org/github.com/streadway/amqp?status.svg)](http://godoc.org/github.com/streadway/amqp) + +# Go RabbitMQ Client Library + +This is an AMQP 0.9.1 client with RabbitMQ extensions in Go. + +## Project Maturity + +This project has been used in production systems for many years. It is reasonably mature +and feature complete, and as of November 2016 has [a team of maintainers](https://github.com/streadway/amqp/issues/215). + +Future API changes are unlikely but possible. They will be discussed on [Github +issues](https://github.com/streadway/amqp/issues) along with any bugs or +enhancements. + +## Supported Go Versions + +This library supports two most recent Go release series, currently 1.10 and 1.11. + + +## Supported RabbitMQ Versions + +This project supports RabbitMQ versions starting with `2.0` but primarily tested +against reasonably recent `3.x` releases. Some features and behaviours may be +server version-specific. + +## Goals + +Provide a functional interface that closely represents the AMQP 0.9.1 model +targeted to RabbitMQ as a server. This includes the minimum necessary to +interact the semantics of the protocol. + +## Non-goals + +Things not intended to be supported. + + * Auto reconnect and re-synchronization of client and server topologies. + * Reconnection would require understanding the error paths when the + topology cannot be declared on reconnect. This would require a new set + of types and code paths that are best suited at the call-site of this + package. AMQP has a dynamic topology that needs all peers to agree. If + this doesn't happen, the behavior is undefined. Instead of producing a + possible interface with undefined behavior, this package is designed to + be simple for the caller to implement the necessary connection-time + topology declaration so that reconnection is trivial and encapsulated in + the caller's application code. + * AMQP Protocol negotiation for forward or backward compatibility. + * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent + specifications that change the semantics and wire format of the protocol. + We will accept patches for other protocol support but have no plans for + implementation ourselves. + * Anything other than PLAIN and EXTERNAL authentication mechanisms. + * Keeping the mechanisms interface modular makes it possible to extend + outside of this package. If other mechanisms prove to be popular, then + we would accept patches to include them in this package. + +## Usage + +See the 'examples' subdirectory for simple producers and consumers executables. +If you have a use-case in mind which isn't well-represented by the examples, +please file an issue. + +## Documentation + +Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for +reference and usage. + +[RabbitMQ tutorials in +Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also +available. + +## Contributing + +Pull requests are very much welcomed. Create your pull request on a non-master +branch, make sure a test or example is included that covers your change and +your commits represent coherent changes that include a reason for the change. + +To run the integration tests, make sure you have RabbitMQ running on any host, +export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags +integration`. TravisCI will also run the integration tests. + +Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors). + +## External packages + + * [Google App Engine Dialer support](https://github.com/soundtrackyourbrand/gaeamqp) + * [RabbitMQ examples in Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) + +## License + +BSD 2 clause - see LICENSE for more details. + + diff --git a/vendor/github.com/streadway/amqp/allocator.go b/vendor/github.com/streadway/amqp/allocator.go new file mode 100644 index 0000000..53620e7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/allocator.go @@ -0,0 +1,106 @@ +package amqp + +import ( + "bytes" + "fmt" + "math/big" +) + +const ( + free = 0 + allocated = 1 +) + +// allocator maintains a bitset of allocated numbers. +type allocator struct { + pool *big.Int + last int + low int + high int +} + +// NewAllocator reserves and frees integers out of a range between low and +// high. +// +// O(N) worst case space used, where N is maximum allocated, divided by +// sizeof(big.Word) +func newAllocator(low, high int) *allocator { + return &allocator{ + pool: big.NewInt(0), + last: low, + low: low, + high: high, + } +} + +// String returns a string describing the contents of the allocator like +// "allocator[low..high] reserved..until" +// +// O(N) where N is high-low +func (a allocator) String() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high) + + for low := a.low; low <= a.high; low++ { + high := low + for a.reserved(high) && high <= a.high { + high++ + } + + if high > low+1 { + fmt.Fprintf(b, " %d..%d", low, high-1) + } else if high > low { + fmt.Fprintf(b, " %d", high-1) + } + + low = high + } + return b.String() +} + +// Next reserves and returns the next available number out of the range between +// low and high. If no number is available, false is returned. +// +// O(N) worst case runtime where N is allocated, but usually O(1) due to a +// rolling index into the oldest allocation. +func (a *allocator) next() (int, bool) { + wrapped := a.last + + // Find trailing bit + for ; a.last <= a.high; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + // Find preceding free'd pool + a.last = a.low + + for ; a.last < wrapped; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + return 0, false +} + +// reserve claims the bit if it is not already claimed, returning true if +// successfully claimed. +func (a *allocator) reserve(n int) bool { + if a.reserved(n) { + return false + } + a.pool.SetBit(a.pool, n-a.low, allocated) + return true +} + +// reserved returns true if the integer has been allocated +func (a *allocator) reserved(n int) bool { + return a.pool.Bit(n-a.low) == allocated +} + +// release frees the use of the number for another allocation +func (a *allocator) release(n int) { + a.pool.SetBit(a.pool, n-a.low, free) +} diff --git a/vendor/github.com/streadway/amqp/auth.go b/vendor/github.com/streadway/amqp/auth.go new file mode 100644 index 0000000..435c94b --- /dev/null +++ b/vendor/github.com/streadway/amqp/auth.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" +) + +// Authentication interface provides a means for different SASL authentication +// mechanisms to be used during connection tuning. +type Authentication interface { + Mechanism() string + Response() string +} + +// PlainAuth is a similar to Basic Auth in HTTP. +type PlainAuth struct { + Username string + Password string +} + +// Mechanism returns "PLAIN" +func (auth *PlainAuth) Mechanism() string { + return "PLAIN" +} + +// Response returns the null character delimited encoding for the SASL PLAIN Mechanism. +func (auth *PlainAuth) Response() string { + return fmt.Sprintf("\000%s\000%s", auth.Username, auth.Password) +} + +// AMQPlainAuth is similar to PlainAuth +type AMQPlainAuth struct { + Username string + Password string +} + +// Mechanism returns "AMQPLAIN" +func (auth *AMQPlainAuth) Mechanism() string { + return "AMQPLAIN" +} + +// Response returns the null character delimited encoding for the SASL PLAIN Mechanism. +func (auth *AMQPlainAuth) Response() string { + return fmt.Sprintf("LOGIN:%sPASSWORD:%s", auth.Username, auth.Password) +} + +// Finds the first mechanism preferred by the client that the server supports. +func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) { + for _, auth = range client { + for _, mech := range serverMechanisms { + if auth.Mechanism() == mech { + return auth, true + } + } + } + + return +} diff --git a/vendor/github.com/streadway/amqp/certs.sh b/vendor/github.com/streadway/amqp/certs.sh new file mode 100644 index 0000000..834f422 --- /dev/null +++ b/vendor/github.com/streadway/amqp/certs.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# +# Creates the CA, server and client certs to be used by tls_test.go +# http://www.rabbitmq.com/ssl.html +# +# Copy stdout into the const section of tls_test.go or use for RabbitMQ +# +root=$PWD/certs + +if [ -f $root/ca/serial ]; then + echo >&2 "Previous installation found" + echo >&2 "Remove $root/ca and rerun to overwrite" + exit 1 +fi + +mkdir -p $root/ca/private +mkdir -p $root/ca/certs +mkdir -p $root/server +mkdir -p $root/client + +cd $root/ca + +chmod 700 private +touch index.txt +echo 'unique_subject = no' > index.txt.attr +echo '01' > serial +echo >openssl.cnf ' +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/cacert.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/cakey.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 3650 +default_md = sha1 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/cakey.pem +default_md = sha1 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 +subjectAltName = @alt_names + +[ alt_names ] +IP.1 = 127.0.0.1 +' + +openssl req \ + -x509 \ + -nodes \ + -config openssl.cnf \ + -newkey rsa:2048 \ + -days 3650 \ + -subj "/CN=MyTestCA/" \ + -out cacert.pem \ + -outform PEM + +openssl x509 \ + -in cacert.pem \ + -out cacert.cer \ + -outform DER + +openssl genrsa -out $root/server/key.pem 2048 +openssl genrsa -out $root/client/key.pem 2048 + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=server/" \ + -key $root/server/key.pem \ + -out $root/server/req.pem \ + -outform PEM + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=client/" \ + -key $root/client/key.pem \ + -out $root/client/req.pem \ + -outform PEM + +openssl ca \ + -config openssl.cnf \ + -in $root/server/req.pem \ + -out $root/server/cert.pem \ + -notext \ + -batch \ + -extensions server_ca_extensions + +openssl ca \ + -config openssl.cnf \ + -in $root/client/req.pem \ + -out $root/client/cert.pem \ + -notext \ + -batch \ + -extensions client_ca_extensions + +cat <<-END +const caCert = \` +`cat $root/ca/cacert.pem` +\` + +const serverCert = \` +`cat $root/server/cert.pem` +\` + +const serverKey = \` +`cat $root/server/key.pem` +\` + +const clientCert = \` +`cat $root/client/cert.pem` +\` + +const clientKey = \` +`cat $root/client/key.pem` +\` +END diff --git a/vendor/github.com/streadway/amqp/channel.go b/vendor/github.com/streadway/amqp/channel.go new file mode 100644 index 0000000..cd19ce7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/channel.go @@ -0,0 +1,1593 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// 0 1 3 7 size+7 size+8 +// +------+---------+-------------+ +------------+ +-----------+ +// | type | channel | size | | payload | | frame-end | +// +------+---------+-------------+ +------------+ +-----------+ +// octet short long size octets octet +const frameHeaderSize = 1 + 2 + 4 + 1 + +/* +Channel represents an AMQP channel. Used as a context for valid message +exchange. Errors on methods with this Channel as a receiver means this channel +should be discarded and a new channel established. + +*/ +type Channel struct { + destructor sync.Once + m sync.Mutex // struct field mutex + confirmM sync.Mutex // publisher confirms state mutex + notifyM sync.RWMutex + + connection *Connection + + rpc chan message + consumers *consumers + + id uint16 + + // closed is set to 1 when the channel has been closed - see Channel.send() + closed int32 + + // true when we will never notify again + noNotify bool + + // Channel and Connection exceptions will be broadcast on these listeners. + closes []chan *Error + + // Listeners for active=true flow control. When true is sent to a listener, + // publishing should pause until false is sent to listeners. + flows []chan bool + + // Listeners for returned publishings for unroutable messages on mandatory + // publishings or undeliverable messages on immediate publishings. + returns []chan Return + + // Listeners for when the server notifies the client that + // a consumer has been cancelled. + cancels []chan string + + // Allocated when in confirm mode in order to track publish counter and order confirms + confirms *confirms + confirming bool + + // Selects on any errors from shutdown during RPC + errors chan *Error + + // State machine that manages frame order, must only be mutated by the connection + recv func(*Channel, frame) error + + // Current state for frame re-assembly, only mutated from recv + message messageWithContent + header *headerFrame + body []byte +} + +// Constructs a new channel with the given framing rules +func newChannel(c *Connection, id uint16) *Channel { + return &Channel{ + connection: c, + id: id, + rpc: make(chan message), + consumers: makeConsumers(), + confirms: newConfirms(), + recv: (*Channel).recvMethod, + errors: make(chan *Error, 1), + } +} + +// shutdown is called by Connection after the channel has been removed from the +// connection registry. +func (ch *Channel) shutdown(e *Error) { + ch.destructor.Do(func() { + ch.m.Lock() + defer ch.m.Unlock() + + // Grab an exclusive lock for the notify channels + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + // Broadcast abnormal shutdown + if e != nil { + for _, c := range ch.closes { + c <- e + } + } + + // Signal that from now on, Channel.send() should call + // Channel.sendClosed() + atomic.StoreInt32(&ch.closed, 1) + + // Notify RPC if we're selecting + if e != nil { + ch.errors <- e + } + + ch.consumers.close() + + for _, c := range ch.closes { + close(c) + } + + for _, c := range ch.flows { + close(c) + } + + for _, c := range ch.returns { + close(c) + } + + for _, c := range ch.cancels { + close(c) + } + + // Set the slices to nil to prevent the dispatch() range from sending on + // the now closed channels after we release the notifyM mutex + ch.flows = nil + ch.closes = nil + ch.returns = nil + ch.cancels = nil + + if ch.confirms != nil { + ch.confirms.Close() + } + + close(ch.errors) + ch.noNotify = true + }) +} + +// send calls Channel.sendOpen() during normal operation. +// +// After the channel has been closed, send calls Channel.sendClosed(), ensuring +// only 'channel.close' is sent to the server. +func (ch *Channel) send(msg message) (err error) { + // If the channel is closed, use Channel.sendClosed() + if atomic.LoadInt32(&ch.closed) == 1 { + return ch.sendClosed(msg) + } + + return ch.sendOpen(msg) +} + +func (ch *Channel) open() error { + return ch.call(&channelOpen{}, &channelOpenOk{}) +} + +// Performs a request/response call for when the message is not NoWait and is +// specified as Synchronous. +func (ch *Channel) call(req message, res ...message) error { + if err := ch.send(req); err != nil { + return err + } + + if req.wait() { + select { + case e, ok := <-ch.errors: + if ok { + return e + } + return ErrClosed + + case msg := <-ch.rpc: + if msg != nil { + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + // RPC channel has been closed without an error, likely due to a hard + // error on the Connection. This indicates we have already been + // shutdown and if were waiting, will have returned from the errors chan. + return ErrClosed + } + } + + return nil +} + +func (ch *Channel) sendClosed(msg message) (err error) { + // After a 'channel.close' is sent or received the only valid response is + // channel.close-ok + if _, ok := msg.(*channelCloseOk); ok { + return ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: msg, + }) + } + + return ErrClosed +} + +func (ch *Channel) sendOpen(msg message) (err error) { + if content, ok := msg.(messageWithContent); ok { + props, body := content.getContent() + class, _ := content.id() + + // catch client max frame size==0 and server max frame size==0 + // set size to length of what we're trying to publish + var size int + if ch.connection.Config.FrameSize > 0 { + size = ch.connection.Config.FrameSize - frameHeaderSize + } else { + size = len(body) + } + + if err = ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: content, + }); err != nil { + return + } + + if err = ch.connection.send(&headerFrame{ + ChannelId: ch.id, + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }); err != nil { + return + } + + // chunk body into size (max frame size - frame header size) + for i, j := 0, size; i < len(body); i, j = j, j+size { + if j > len(body) { + j = len(body) + } + + if err = ch.connection.send(&bodyFrame{ + ChannelId: ch.id, + Body: body[i:j], + }); err != nil { + return + } + } + } else { + err = ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: msg, + }) + } + + return +} + +// Eventually called via the state machine from the connection's reader +// goroutine, so assumes serialized access. +func (ch *Channel) dispatch(msg message) { + switch m := msg.(type) { + case *channelClose: + // lock before sending connection.close-ok + // to avoid unexpected interleaving with basic.publish frames if + // publishing is happening concurrently + ch.m.Lock() + ch.send(&channelCloseOk{}) + ch.m.Unlock() + ch.connection.closeChannel(ch, newError(m.ReplyCode, m.ReplyText)) + + case *channelFlow: + ch.notifyM.RLock() + for _, c := range ch.flows { + c <- m.Active + } + ch.notifyM.RUnlock() + ch.send(&channelFlowOk{Active: m.Active}) + + case *basicCancel: + ch.notifyM.RLock() + for _, c := range ch.cancels { + c <- m.ConsumerTag + } + ch.notifyM.RUnlock() + ch.consumers.cancel(m.ConsumerTag) + + case *basicReturn: + ret := newReturn(*m) + ch.notifyM.RLock() + for _, c := range ch.returns { + c <- *ret + } + ch.notifyM.RUnlock() + + case *basicAck: + if ch.confirming { + if m.Multiple { + ch.confirms.Multiple(Confirmation{m.DeliveryTag, true}) + } else { + ch.confirms.One(Confirmation{m.DeliveryTag, true}) + } + } + + case *basicNack: + if ch.confirming { + if m.Multiple { + ch.confirms.Multiple(Confirmation{m.DeliveryTag, false}) + } else { + ch.confirms.One(Confirmation{m.DeliveryTag, false}) + } + } + + case *basicDeliver: + ch.consumers.send(m.ConsumerTag, newDelivery(ch, m)) + // TODO log failed consumer and close channel, this can happen when + // deliveries are in flight and a no-wait cancel has happened + + default: + ch.rpc <- msg + } +} + +func (ch *Channel) transition(f func(*Channel, frame) error) error { + ch.recv = f + return nil +} + +func (ch *Channel) recvMethod(f frame) error { + switch frame := f.(type) { + case *methodFrame: + if msg, ok := frame.Method.(messageWithContent); ok { + ch.body = make([]byte, 0) + ch.message = msg + return ch.transition((*Channel).recvHeader) + } + + ch.dispatch(frame.Method) // termination state + return ch.transition((*Channel).recvMethod) + + case *headerFrame: + // drop + return ch.transition((*Channel).recvMethod) + + case *bodyFrame: + // drop + return ch.transition((*Channel).recvMethod) + } + + panic("unexpected frame type") +} + +func (ch *Channel) recvHeader(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return ch.recvMethod(f) + + case *headerFrame: + // start collecting if we expect body frames + ch.header = frame + + if frame.Size == 0 { + ch.message.setContent(ch.header.Properties, ch.body) + ch.dispatch(ch.message) // termination state + return ch.transition((*Channel).recvMethod) + } + return ch.transition((*Channel).recvContent) + + case *bodyFrame: + // drop and reset + return ch.transition((*Channel).recvMethod) + } + + panic("unexpected frame type") +} + +// state after method + header and before the length +// defined by the header has been reached +func (ch *Channel) recvContent(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return ch.recvMethod(f) + + case *headerFrame: + // drop and reset + return ch.transition((*Channel).recvMethod) + + case *bodyFrame: + if cap(ch.body) == 0 { + ch.body = make([]byte, 0, ch.header.Size) + } + ch.body = append(ch.body, frame.Body...) + + if uint64(len(ch.body)) >= ch.header.Size { + ch.message.setContent(ch.header.Properties, ch.body) + ch.dispatch(ch.message) // termination state + return ch.transition((*Channel).recvMethod) + } + + return ch.transition((*Channel).recvContent) + } + + panic("unexpected frame type") +} + +/* +Close initiate a clean channel closure by sending a close message with the error +code set to '200'. + +It is safe to call this method multiple times. + +*/ +func (ch *Channel) Close() error { + defer ch.connection.closeChannel(ch, nil) + return ch.call( + &channelClose{ReplyCode: replySuccess}, + &channelCloseOk{}, + ) +} + +/* +NotifyClose registers a listener for when the server sends a channel or +connection exception in the form of a Connection.Close or Channel.Close method. +Connection exceptions will be broadcast to all open channels and all channels +will be closed, where channel exceptions will only be broadcast to listeners to +this channel. + +The chan provided will be closed when the Channel is closed and on a +graceful close, no error will be sent. + +*/ +func (ch *Channel) NotifyClose(c chan *Error) chan *Error { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.closes = append(ch.closes, c) + } + + return c +} + +/* +NotifyFlow registers a listener for basic.flow methods sent by the server. +When `false` is sent on one of the listener channels, all publishers should +pause until a `true` is sent. + +The server may ask the producer to pause or restart the flow of Publishings +sent by on a channel. This is a simple flow-control mechanism that a server can +use to avoid overflowing its queues or otherwise finding itself receiving more +messages than it can process. Note that this method is not intended for window +control. It does not affect contents returned by basic.get-ok methods. + +When a new channel is opened, it is active (flow is active). Some +applications assume that channels are inactive until started. To emulate +this behavior a client MAY open the channel, then pause it. + +Publishers should respond to a flow messages as rapidly as possible and the +server may disconnect over producing channels that do not respect these +messages. + +basic.flow-ok methods will always be returned to the server regardless of +the number of listeners there are. + +To control the flow of deliveries from the server, use the Channel.Flow() +method instead. + +Note: RabbitMQ will rather use TCP pushback on the network connection instead +of sending basic.flow. This means that if a single channel is producing too +much on the same connection, all channels using that connection will suffer, +including acknowledgments from deliveries. Use different Connections if you +desire to interleave consumers and producers in the same process to avoid your +basic.ack messages from getting rate limited with your basic.publish messages. + +*/ +func (ch *Channel) NotifyFlow(c chan bool) chan bool { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.flows = append(ch.flows, c) + } + + return c +} + +/* +NotifyReturn registers a listener for basic.return methods. These can be sent +from the server when a publish is undeliverable either from the mandatory or +immediate flags. + +A return struct has a copy of the Publishing along with some error +information about why the publishing failed. + +*/ +func (ch *Channel) NotifyReturn(c chan Return) chan Return { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.returns = append(ch.returns, c) + } + + return c +} + +/* +NotifyCancel registers a listener for basic.cancel methods. These can be sent +from the server when a queue is deleted or when consuming from a mirrored queue +where the master has just failed (and was moved to another node). + +The subscription tag is returned to the listener. + +*/ +func (ch *Channel) NotifyCancel(c chan string) chan string { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.cancels = append(ch.cancels, c) + } + + return c +} + +/* +NotifyConfirm calls NotifyPublish and starts a goroutine sending +ordered Ack and Nack DeliveryTag to the respective channels. + +For strict ordering, use NotifyPublish instead. +*/ +func (ch *Channel) NotifyConfirm(ack, nack chan uint64) (chan uint64, chan uint64) { + confirms := ch.NotifyPublish(make(chan Confirmation, cap(ack)+cap(nack))) + + go func() { + for c := range confirms { + if c.Ack { + ack <- c.DeliveryTag + } else { + nack <- c.DeliveryTag + } + } + close(ack) + if nack != ack { + close(nack) + } + }() + + return ack, nack +} + +/* +NotifyPublish registers a listener for reliable publishing. Receives from this +chan for every publish after Channel.Confirm will be in order starting with +DeliveryTag 1. + +There will be one and only one Confirmation Publishing starting with the +delivery tag of 1 and progressing sequentially until the total number of +Publishings have been seen by the server. + +Acknowledgments will be received in the order of delivery from the +NotifyPublish channels even if the server acknowledges them out of order. + +The listener chan will be closed when the Channel is closed. + +The capacity of the chan Confirmation must be at least as large as the +number of outstanding publishings. Not having enough buffered chans will +create a deadlock if you attempt to perform other operations on the Connection +or Channel while confirms are in-flight. + +It's advisable to wait for all Confirmations to arrive before calling +Channel.Close() or Connection.Close(). + +*/ +func (ch *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(confirm) + } else { + ch.confirms.Listen(confirm) + } + + return confirm + +} + +/* +Qos controls how many messages or how many bytes the server will try to keep on +the network for consumers before receiving delivery acks. The intent of Qos is +to make sure the network buffers stay full between the server and client. + +With a prefetch count greater than zero, the server will deliver that many +messages to consumers before acknowledgments are received. The server ignores +this option when consumers are started with noAck because no acknowledgments +are expected or sent. + +With a prefetch size greater than zero, the server will try to keep at least +that many bytes of deliveries flushed to the network before receiving +acknowledgments from the consumers. This option is ignored when consumers are +started with noAck. + +When global is true, these Qos settings apply to all existing and future +consumers on all channels on the same connection. When false, the Channel.Qos +settings will apply to all existing and future consumers on this channel. + +Please see the RabbitMQ Consumer Prefetch documentation for an explanation of +how the global flag is implemented in RabbitMQ, as it differs from the +AMQP 0.9.1 specification in that global Qos settings are limited in scope to +channels, not connections (https://www.rabbitmq.com/consumer-prefetch.html). + +To get round-robin behavior between consumers consuming from the same queue on +different connections, set the prefetch count to 1, and the next available +message on the server will be delivered to the next available consumer. + +If your consumer work time is reasonably consistent and not much greater +than two times your network round trip time, you will see significant +throughput improvements starting with a prefetch count of 2 or slightly +greater as described by benchmarks on RabbitMQ. + +http://www.rabbitmq.com/blog/2012/04/25/rabbitmq-performance-measurements-part-2/ +*/ +func (ch *Channel) Qos(prefetchCount, prefetchSize int, global bool) error { + return ch.call( + &basicQos{ + PrefetchCount: uint16(prefetchCount), + PrefetchSize: uint32(prefetchSize), + Global: global, + }, + &basicQosOk{}, + ) +} + +/* +Cancel stops deliveries to the consumer chan established in Channel.Consume and +identified by consumer. + +Only use this method to cleanly stop receiving deliveries from the server and +cleanly shut down the consumer chan identified by this tag. Using this method +and waiting for remaining messages to flush from the consumer chan will ensure +all messages received on the network will be delivered to the receiver of your +consumer chan. + +Continue consuming from the chan Delivery provided by Channel.Consume until the +chan closes. + +When noWait is true, do not wait for the server to acknowledge the cancel. +Only use this when you are certain there are no deliveries in flight that +require an acknowledgment, otherwise they will arrive and be dropped in the +client without an ack, and will not be redelivered to other consumers. + +*/ +func (ch *Channel) Cancel(consumer string, noWait bool) error { + req := &basicCancel{ + ConsumerTag: consumer, + NoWait: noWait, + } + res := &basicCancelOk{} + + if err := ch.call(req, res); err != nil { + return err + } + + if req.wait() { + ch.consumers.cancel(res.ConsumerTag) + } else { + // Potentially could drop deliveries in flight + ch.consumers.cancel(consumer) + } + + return nil +} + +/* +QueueDeclare declares a queue to hold messages and deliver to consumers. +Declaring creates a queue if it doesn't already exist, or ensures that an +existing queue matches the same parameters. + +Every queue declared gets a default binding to the empty exchange "" which has +the type "direct" with the routing key matching the queue's name. With this +default binding, it is possible to publish messages that route directly to +this queue by publishing to "" with the routing key of the queue name. + + QueueDeclare("alerts", true, false, false, false, nil) + Publish("", "alerts", false, false, Publishing{Body: []byte("...")}) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alerts -> "" -> alerts -> alerts + +The queue name may be empty, in which case the server will generate a unique name +which will be returned in the Name field of Queue struct. + +Durable and Non-Auto-Deleted queues will survive server restarts and remain +when there are no remaining consumers or bindings. Persistent publishings will +be restored in this queue on server restart. These queues are only able to be +bound to durable exchanges. + +Non-Durable and Auto-Deleted queues will not be redeclared on server restart +and will be deleted by the server after a short time when the last consumer is +canceled or the last consumer's channel is closed. Queues with this lifetime +can also be deleted normally with QueueDelete. These durable queues can only +be bound to non-durable exchanges. + +Non-Durable and Non-Auto-Deleted queues will remain declared as long as the +server is running regardless of how many consumers. This lifetime is useful +for temporary topologies that may have long delays between consumer activity. +These queues can only be bound to non-durable exchanges. + +Durable and Auto-Deleted queues will be restored on server restart, but without +active consumers will not survive and be removed. This Lifetime is unlikely +to be useful. + +Exclusive queues are only accessible by the connection that declares them and +will be deleted when the connection closes. Channels on other connections +will receive an error when attempting to declare, bind, consume, purge or +delete a queue with the same name. + +When noWait is true, the queue will assume to be declared on the server. A +channel exception will arrive if the conditions are met for existing queues +or attempting to modify an existing queue from a different connection. + +When the error return value is not nil, you can assume the queue could not be +declared with these parameters, and the channel will be closed. + +*/ +func (ch *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := ch.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{Name: name}, nil +} + +/* + +QueueDeclarePassive is functionally and parametrically equivalent to +QueueDeclare, except that it sets the "passive" attribute to true. A passive +queue is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent queue will cause RabbitMQ to throw an exception. This function +can be used to test for the existence of a queue. + +*/ +func (ch *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := ch.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{Name: name}, nil +} + +/* +QueueInspect passively declares a queue by name to inspect the current message +count and consumer count. + +Use this method to check how many messages ready for delivery reside in the queue, +how many consumers are receiving deliveries, and whether a queue by this +name already exists. + +If the queue by this name exists, use Channel.QueueDeclare check if it is +declared with specific parameters. + +If a queue by this name does not exist, an error will be returned and the +channel will be closed. + +*/ +func (ch *Channel) QueueInspect(name string) (Queue, error) { + req := &queueDeclare{ + Queue: name, + Passive: true, + } + res := &queueDeclareOk{} + + err := ch.call(req, res) + + state := Queue{ + Name: name, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + } + + return state, err +} + +/* +QueueBind binds an exchange to a queue so that publishings to the exchange will +be routed to the queue when the publishing routing key matches the binding +routing key. + + QueueBind("pagers", "alert", "log", false, nil) + QueueBind("emails", "info", "log", false, nil) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> log ----> alert --> pagers + key: info ---> log ----> info ---> emails + key: debug --> log (none) (dropped) + +If a binding with the same key and arguments already exists between the +exchange and queue, the attempt to rebind will be ignored and the existing +binding will be retained. + +In the case that multiple bindings may cause the message to be routed to the +same queue, the server will only route the publishing once. This is possible +with topic exchanges. + + QueueBind("pagers", "alert", "amq.topic", false, nil) + QueueBind("emails", "info", "amq.topic", false, nil) + QueueBind("emails", "#", "amq.topic", false, nil) // match everything + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> amq.topic ----> alert --> pagers + key: info ---> amq.topic ----> # ------> emails + \---> info ---/ + key: debug --> amq.topic ----> # ------> emails + +It is only possible to bind a durable queue to a durable exchange regardless of +whether the queue or exchange is auto-deleted. Bindings between durable queues +and exchanges will also be restored on server restart. + +If the binding could not complete, an error will be returned and the channel +will be closed. + +When noWait is false and the queue could not be bound, the channel will be +closed with an error. + +*/ +func (ch *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &queueBind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &queueBindOk{}, + ) +} + +/* +QueueUnbind removes a binding between an exchange and queue matching the key and +arguments. + +It is possible to send and empty string for the exchange name which means to +unbind the queue from the default exchange. + +*/ +func (ch *Channel) QueueUnbind(name, key, exchange string, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &queueUnbind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + Arguments: args, + }, + &queueUnbindOk{}, + ) +} + +/* +QueuePurge removes all messages from the named queue which are not waiting to +be acknowledged. Messages that have been delivered but have not yet been +acknowledged will not be removed. + +When successful, returns the number of messages purged. + +If noWait is true, do not wait for the server response and the number of +messages purged will not be meaningful. +*/ +func (ch *Channel) QueuePurge(name string, noWait bool) (int, error) { + req := &queuePurge{ + Queue: name, + NoWait: noWait, + } + res := &queuePurgeOk{} + + err := ch.call(req, res) + + return int(res.MessageCount), err +} + +/* +QueueDelete removes the queue from the server including all bindings then +purges the messages based on server configuration, returning the number of +messages purged. + +When ifUnused is true, the queue will not be deleted if there are any +consumers on the queue. If there are consumers, an error will be returned and +the channel will be closed. + +When ifEmpty is true, the queue will not be deleted if there are any messages +remaining on the queue. If there are messages, an error will be returned and +the channel will be closed. + +When noWait is true, the queue will be deleted without waiting for a response +from the server. The purged message count will not be meaningful. If the queue +could not be deleted, a channel exception will be raised and the channel will +be closed. + +*/ +func (ch *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) { + req := &queueDelete{ + Queue: name, + IfUnused: ifUnused, + IfEmpty: ifEmpty, + NoWait: noWait, + } + res := &queueDeleteOk{} + + err := ch.call(req, res) + + return int(res.MessageCount), err +} + +/* +Consume immediately starts delivering queued messages. + +Begin receiving on the returned chan Delivery before any other operation on the +Connection or Channel. + +Continues deliveries to the returned chan Delivery until Channel.Cancel, +Connection.Close, Channel.Close, or an AMQP exception occurs. Consumers must +range over the chan to ensure all deliveries are received. Unreceived +deliveries will block all methods on the same connection. + +All deliveries in AMQP must be acknowledged. It is expected of the consumer to +call Delivery.Ack after it has successfully processed the delivery. If the +consumer is cancelled or the channel or connection is closed any unacknowledged +deliveries will be requeued at the end of the same queue. + +The consumer is identified by a string that is unique and scoped for all +consumers on this channel. If you wish to eventually cancel the consumer, use +the same non-empty identifier in Channel.Cancel. An empty string will cause +the library to generate a unique identity. The consumer identity will be +included in every Delivery in the ConsumerTag field + +When autoAck (also known as noAck) is true, the server will acknowledge +deliveries to this consumer prior to writing the delivery to the network. When +autoAck is true, the consumer should not call Delivery.Ack. Automatically +acknowledging deliveries means that some deliveries may get lost if the +consumer is unable to process them after the server delivers them. +See http://www.rabbitmq.com/confirms.html for more details. + +When exclusive is true, the server will ensure that this is the sole consumer +from this queue. When exclusive is false, the server will fairly distribute +deliveries across multiple consumers. + +The noLocal flag is not supported by RabbitMQ. + +It's advisable to use separate connections for +Channel.Publish and Channel.Consume so not to have TCP pushback on publishing +affect the ability to consume messages, so this parameter is here mostly for +completeness. + +When noWait is true, do not wait for the server to confirm the request and +immediately begin deliveries. If it is not possible to consume, a channel +exception will be raised and the channel will be closed. + +Optional arguments can be provided that have specific semantics for the queue +or server. + +Inflight messages, limited by Channel.Qos will be buffered until received from +the returned chan. + +When the Channel or Connection is closed, all buffered and inflight messages will +be dropped. + +When the consumer tag is cancelled, all inflight messages will be delivered until +the returned chan is closed. + +*/ +func (ch *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) { + // When we return from ch.call, there may be a delivery already for the + // consumer that hasn't been added to the consumer hash yet. Because of + // this, we never rely on the server picking a consumer tag for us. + + if err := args.Validate(); err != nil { + return nil, err + } + + if consumer == "" { + consumer = uniqueConsumerTag() + } + + req := &basicConsume{ + Queue: queue, + ConsumerTag: consumer, + NoLocal: noLocal, + NoAck: autoAck, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &basicConsumeOk{} + + deliveries := make(chan Delivery) + + ch.consumers.add(consumer, deliveries) + + if err := ch.call(req, res); err != nil { + ch.consumers.cancel(consumer) + return nil, err + } + + return (<-chan Delivery)(deliveries), nil +} + +/* +ExchangeDeclare declares an exchange on the server. If the exchange does not +already exist, the server will create it. If the exchange exists, the server +verifies that it is of the provided type, durability and auto-delete flags. + +Errors returned from this method will close the channel. + +Exchange names starting with "amq." are reserved for pre-declared and +standardized exchanges. The client MAY declare an exchange starting with +"amq." if the passive option is set, or the exchange already exists. Names can +consist of a non-empty sequence of letters, digits, hyphen, underscore, +period, or colon. + +Each exchange belongs to one of a set of exchange kinds/types implemented by +the server. The exchange types define the functionality of the exchange - i.e. +how messages are routed through it. Once an exchange is declared, its type +cannot be changed. The common types are "direct", "fanout", "topic" and +"headers". + +Durable and Non-Auto-Deleted exchanges will survive server restarts and remain +declared when there are no remaining bindings. This is the best lifetime for +long-lived exchange configurations like stable routes and default exchanges. + +Non-Durable and Auto-Deleted exchanges will be deleted when there are no +remaining bindings and not restored on server restart. This lifetime is +useful for temporary topologies that should not pollute the virtual host on +failure or after the consumers have completed. + +Non-Durable and Non-Auto-deleted exchanges will remain as long as the server is +running including when there are no remaining bindings. This is useful for +temporary topologies that may have long delays between bindings. + +Durable and Auto-Deleted exchanges will survive server restarts and will be +removed before and after server restarts when there are no remaining bindings. +These exchanges are useful for robust temporary topologies or when you require +binding durable queues to auto-deleted exchanges. + +Note: RabbitMQ declares the default exchange types like 'amq.fanout' as +durable, so queues that bind to these pre-declared exchanges must also be +durable. + +Exchanges declared as `internal` do not accept accept publishings. Internal +exchanges are useful when you wish to implement inter-exchange topologies +that should not be exposed to users of the broker. + +When noWait is true, declare without waiting for a confirmation from the server. +The channel may be closed as a result of an error. Add a NotifyClose listener +to respond to any exceptions. + +Optional amqp.Table of arguments that are specific to the server's implementation of +the exchange can be sent for exchange types that require extra parameters. +*/ +func (ch *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* + +ExchangeDeclarePassive is functionally and parametrically equivalent to +ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent exchange will cause RabbitMQ to throw an exception. This function +can be used to detect the existence of an exchange. + +*/ +func (ch *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* +ExchangeDelete removes the named exchange from the server. When an exchange is +deleted all queue bindings on the exchange are also deleted. If this exchange +does not exist, the channel will be closed with an error. + +When ifUnused is true, the server will only delete the exchange if it has no queue +bindings. If the exchange has queue bindings the server does not delete it +but close the channel with an exception instead. Set this to true if you are +not the sole owner of the exchange. + +When noWait is true, do not wait for a server confirmation that the exchange has +been deleted. Failing to delete the channel could close the channel. Add a +NotifyClose listener to respond to these channel exceptions. +*/ +func (ch *Channel) ExchangeDelete(name string, ifUnused, noWait bool) error { + return ch.call( + &exchangeDelete{ + Exchange: name, + IfUnused: ifUnused, + NoWait: noWait, + }, + &exchangeDeleteOk{}, + ) +} + +/* +ExchangeBind binds an exchange to another exchange to create inter-exchange +routing topologies on the server. This can decouple the private topology and +routing exchanges from exchanges intended solely for publishing endpoints. + +Binding two exchanges with identical arguments will not create duplicate +bindings. + +Binding one exchange to another with multiple bindings will only deliver a +message once. For example if you bind your exchange to `amq.fanout` with two +different binding keys, only a single message will be delivered to your +exchange even though multiple bindings will match. + +Given a message delivered to the source exchange, the message will be forwarded +to the destination exchange when the routing key is matched. + + ExchangeBind("sell", "MSFT", "trade", false, nil) + ExchangeBind("buy", "AAPL", "trade", false, nil) + + Delivery Source Key Destination + example exchange exchange + ----------------------------------------------- + key: AAPL --> trade ----> MSFT sell + \---> AAPL --> buy + +When noWait is true, do not wait for the server to confirm the binding. If any +error occurs the channel will be closed. Add a listener to NotifyClose to +handle these errors. + +Optional arguments specific to the exchanges bound can also be specified. +*/ +func (ch *Channel) ExchangeBind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeBind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeBindOk{}, + ) +} + +/* +ExchangeUnbind unbinds the destination exchange from the source exchange on the +server by removing the routing key between them. This is the inverse of +ExchangeBind. If the binding does not currently exist, an error will be +returned. + +When noWait is true, do not wait for the server to confirm the deletion of the +binding. If any error occurs the channel will be closed. Add a listener to +NotifyClose to handle these errors. + +Optional arguments that are specific to the type of exchanges bound can also be +provided. These must match the same arguments specified in ExchangeBind to +identify the binding. +*/ +func (ch *Channel) ExchangeUnbind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeUnbind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeUnbindOk{}, + ) +} + +/* +Publish sends a Publishing from the client to an exchange on the server. + +When you want a single message to be delivered to a single queue, you can +publish to the default exchange with the routingKey of the queue name. This is +because every declared queue gets an implicit route to the default exchange. + +Since publishings are asynchronous, any undeliverable message will get returned +by the server. Add a listener with Channel.NotifyReturn to handle any +undeliverable message when calling publish with either the mandatory or +immediate parameters as true. + +Publishings can be undeliverable when the mandatory flag is true and no queue is +bound that matches the routing key, or when the immediate flag is true and no +consumer on the matched queue is ready to accept the delivery. + +This can return an error when the channel, connection or socket is closed. The +error or lack of an error does not indicate whether the server has received this +publishing. + +It is possible for publishing to not reach the broker if the underlying socket +is shut down without pending publishing packets being flushed from the kernel +buffers. The easy way of making it probable that all publishings reach the +server is to always call Connection.Close before terminating your publishing +application. The way to ensure that all publishings reach the server is to add +a listener to Channel.NotifyPublish and put the channel in confirm mode with +Channel.Confirm. Publishing delivery tags and their corresponding +confirmations start at 1. Exit when all publishings are confirmed. + +When Publish does not return an error and the channel is in confirm mode, the +internal counter for DeliveryTags with the first confirmation starts at 1. + +*/ +func (ch *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { + if err := msg.Headers.Validate(); err != nil { + return err + } + + ch.m.Lock() + defer ch.m.Unlock() + + if err := ch.send(&basicPublish{ + Exchange: exchange, + RoutingKey: key, + Mandatory: mandatory, + Immediate: immediate, + Body: msg.Body, + Properties: properties{ + Headers: msg.Headers, + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + }, + }); err != nil { + return err + } + + if ch.confirming { + ch.confirms.Publish() + } + + return nil +} + +/* +Get synchronously receives a single Delivery from the head of a queue from the +server to the client. In almost all cases, using Channel.Consume will be +preferred. + +If there was a delivery waiting on the queue and that delivery was received, the +second return value will be true. If there was no delivery waiting or an error +occurred, the ok bool will be false. + +All deliveries must be acknowledged including those from Channel.Get. Call +Delivery.Ack on the returned delivery when you have fully processed this +delivery. + +When autoAck is true, the server will automatically acknowledge this message so +you don't have to. But if you are unable to fully process this message before +the channel or connection is closed, the message will not get requeued. + +*/ +func (ch *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { + req := &basicGet{Queue: queue, NoAck: autoAck} + res := &basicGetOk{} + empty := &basicGetEmpty{} + + if err := ch.call(req, res, empty); err != nil { + return Delivery{}, false, err + } + + if res.DeliveryTag > 0 { + return *(newDelivery(ch, res)), true, nil + } + + return Delivery{}, false, nil +} + +/* +Tx puts the channel into transaction mode on the server. All publishings and +acknowledgments following this method will be atomically committed or rolled +back for a single queue. Call either Channel.TxCommit or Channel.TxRollback to +leave a this transaction and immediately start a new transaction. + +The atomicity across multiple queues is not defined as queue declarations and +bindings are not included in the transaction. + +The behavior of publishings that are delivered as mandatory or immediate while +the channel is in a transaction is not defined. + +Once a channel has been put into transaction mode, it cannot be taken out of +transaction mode. Use a different channel for non-transactional semantics. + +*/ +func (ch *Channel) Tx() error { + return ch.call( + &txSelect{}, + &txSelectOk{}, + ) +} + +/* +TxCommit atomically commits all publishings and acknowledgments for a single +queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (ch *Channel) TxCommit() error { + return ch.call( + &txCommit{}, + &txCommitOk{}, + ) +} + +/* +TxRollback atomically rolls back all publishings and acknowledgments for a +single queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (ch *Channel) TxRollback() error { + return ch.call( + &txRollback{}, + &txRollbackOk{}, + ) +} + +/* +Flow pauses the delivery of messages to consumers on this channel. Channels +are opened with flow control active, to open a channel with paused +deliveries immediately call this method with `false` after calling +Connection.Channel. + +When active is `false`, this method asks the server to temporarily pause deliveries +until called again with active as `true`. + +Channel.Get methods will not be affected by flow control. + +This method is not intended to act as window control. Use Channel.Qos to limit +the number of unacknowledged messages or bytes in flight instead. + +The server may also send us flow methods to throttle our publishings. A well +behaving publishing client should add a listener with Channel.NotifyFlow and +pause its publishings when `false` is sent on that channel. + +Note: RabbitMQ prefers to use TCP push back to control flow for all channels on +a connection, so under high volume scenarios, it's wise to open separate +Connections for publishings and deliveries. + +*/ +func (ch *Channel) Flow(active bool) error { + return ch.call( + &channelFlow{Active: active}, + &channelFlowOk{}, + ) +} + +/* +Confirm puts this channel into confirm mode so that the client can ensure all +publishings have successfully been received by the server. After entering this +mode, the server will send a basic.ack or basic.nack message with the deliver +tag set to a 1 based incremental index corresponding to every publishing +received after the this method returns. + +Add a listener to Channel.NotifyPublish to respond to the Confirmations. If +Channel.NotifyPublish is not called, the Confirmations will be silently +ignored. + +The order of acknowledgments is not bound to the order of deliveries. + +Ack and Nack confirmations will arrive at some point in the future. + +Unroutable mandatory or immediate messages are acknowledged immediately after +any Channel.NotifyReturn listeners have been notified. Other messages are +acknowledged when all queues that should have the message routed to them have +either received acknowledgment of delivery or have enqueued the message, +persisting the message if necessary. + +When noWait is true, the client will not wait for a response. A channel +exception could occur if the server does not support this method. + +*/ +func (ch *Channel) Confirm(noWait bool) error { + if err := ch.call( + &confirmSelect{Nowait: noWait}, + &confirmSelectOk{}, + ); err != nil { + return err + } + + ch.confirmM.Lock() + ch.confirming = true + ch.confirmM.Unlock() + + return nil +} + +/* +Recover redelivers all unacknowledged deliveries on this channel. + +When requeue is false, messages will be redelivered to the original consumer. + +When requeue is true, messages will be redelivered to any available consumer, +potentially including the original. + +If the deliveries cannot be recovered, an error will be returned and the channel +will be closed. + +Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead +*/ +func (ch *Channel) Recover(requeue bool) error { + return ch.call( + &basicRecover{Requeue: requeue}, + &basicRecoverOk{}, + ) +} + +/* +Ack acknowledges a delivery by its delivery tag when having been consumed with +Channel.Consume or Channel.Get. + +Ack acknowledges all message received prior to the delivery tag when multiple +is true. + +See also Delivery.Ack +*/ +func (ch *Channel) Ack(tag uint64, multiple bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicAck{ + DeliveryTag: tag, + Multiple: multiple, + }) +} + +/* +Nack negatively acknowledges a delivery by its delivery tag. Prefer this +method to notify the server that you were not able to process this delivery and +it must be redelivered or dropped. + +See also Delivery.Nack +*/ +func (ch *Channel) Nack(tag uint64, multiple bool, requeue bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicNack{ + DeliveryTag: tag, + Multiple: multiple, + Requeue: requeue, + }) +} + +/* +Reject negatively acknowledges a delivery by its delivery tag. Prefer Nack +over Reject when communicating with a RabbitMQ server because you can Nack +multiple messages, reducing the amount of protocol messages to exchange. + +See also Delivery.Reject +*/ +func (ch *Channel) Reject(tag uint64, requeue bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicReject{ + DeliveryTag: tag, + Requeue: requeue, + }) +} diff --git a/vendor/github.com/streadway/amqp/confirms.go b/vendor/github.com/streadway/amqp/confirms.go new file mode 100644 index 0000000..06cbaa7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/confirms.go @@ -0,0 +1,94 @@ +package amqp + +import "sync" + +// confirms resequences and notifies one or multiple publisher confirmation listeners +type confirms struct { + m sync.Mutex + listeners []chan Confirmation + sequencer map[uint64]Confirmation + published uint64 + expecting uint64 +} + +// newConfirms allocates a confirms +func newConfirms() *confirms { + return &confirms{ + sequencer: map[uint64]Confirmation{}, + published: 0, + expecting: 1, + } +} + +func (c *confirms) Listen(l chan Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + c.listeners = append(c.listeners, l) +} + +// publish increments the publishing counter +func (c *confirms) Publish() uint64 { + c.m.Lock() + defer c.m.Unlock() + + c.published++ + return c.published +} + +// confirm confirms one publishing, increments the expecting delivery tag, and +// removes bookkeeping for that delivery tag. +func (c *confirms) confirm(confirmation Confirmation) { + delete(c.sequencer, c.expecting) + c.expecting++ + for _, l := range c.listeners { + l <- confirmation + } +} + +// resequence confirms any out of order delivered confirmations +func (c *confirms) resequence() { + for c.expecting <= c.published { + sequenced, found := c.sequencer[c.expecting] + if !found { + return + } + c.confirm(sequenced) + } +} + +// one confirms one publishing and all following in the publishing sequence +func (c *confirms) One(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + if c.expecting == confirmed.DeliveryTag { + c.confirm(confirmed) + } else { + c.sequencer[confirmed.DeliveryTag] = confirmed + } + c.resequence() +} + +// multiple confirms all publishings up until the delivery tag +func (c *confirms) Multiple(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + for c.expecting <= confirmed.DeliveryTag { + c.confirm(Confirmation{c.expecting, confirmed.Ack}) + } + c.resequence() +} + +// Close closes all listeners, discarding any out of sequence confirmations +func (c *confirms) Close() error { + c.m.Lock() + defer c.m.Unlock() + + for _, l := range c.listeners { + close(l) + } + c.listeners = nil + return nil +} diff --git a/vendor/github.com/streadway/amqp/connection.go b/vendor/github.com/streadway/amqp/connection.go new file mode 100644 index 0000000..b9d8e8e --- /dev/null +++ b/vendor/github.com/streadway/amqp/connection.go @@ -0,0 +1,847 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + maxChannelMax = (2 << 15) - 1 + + defaultHeartbeat = 10 * time.Second + defaultConnectionTimeout = 30 * time.Second + defaultProduct = "https://github.com/streadway/amqp" + defaultVersion = "β" + // Safer default that makes channel leaks a lot easier to spot + // before they create operational headaches. See https://github.com/rabbitmq/rabbitmq-server/issues/1593. + defaultChannelMax = (2 << 10) - 1 + defaultLocale = "en_US" +) + +// Config is used in DialConfig and Open to specify the desired tuning +// parameters used during a connection open handshake. The negotiated tuning +// will be stored in the returned connection's Config field. +type Config struct { + // The SASL mechanisms to try in the client request, and the successful + // mechanism used on the Connection object. + // If SASL is nil, PlainAuth from the URL is used. + SASL []Authentication + + // Vhost specifies the namespace of permissions, exchanges, queues and + // bindings on the server. Dial sets this to the path parsed from the URL. + Vhost string + + ChannelMax int // 0 max channels means 2^16 - 1 + FrameSize int // 0 max bytes means unlimited + Heartbeat time.Duration // less than 1s uses the server's interval + + // TLSClientConfig specifies the client configuration of the TLS connection + // when establishing a tls transport. + // If the URL uses an amqps scheme, then an empty tls.Config with the + // ServerName from the URL is used. + TLSClientConfig *tls.Config + + // Properties is table of properties that the client advertises to the server. + // This is an optional setting - if the application does not set this, + // the underlying library will use a generic set of client properties. + Properties Table + + // Connection locale that we expect to always be en_US + // Even though servers must return it as per the AMQP 0-9-1 spec, + // we are not aware of it being used other than to satisfy the spec requirements + Locale string + + // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig, + // then an AMQP connection handshake. + // If Dial is nil, net.DialTimeout with a 30s connection and 30s deadline is + // used during TLS and AMQP handshaking. + Dial func(network, addr string) (net.Conn, error) +} + +// Connection manages the serialization and deserialization of frames from IO +// and dispatches the frames to the appropriate channel. All RPC methods and +// asynchronous Publishing, Delivery, Ack, Nack and Return messages are +// multiplexed on this channel. There must always be active receivers for +// every asynchronous message on this connection. +type Connection struct { + destructor sync.Once // shutdown once + sendM sync.Mutex // conn writer mutex + m sync.Mutex // struct field mutex + + conn io.ReadWriteCloser + + rpc chan message + writer *writer + sends chan time.Time // timestamps of each frame sent + deadlines chan readDeadliner // heartbeater updates read deadlines + + allocator *allocator // id generator valid after openTune + channels map[uint16]*Channel + + noNotify bool // true when we will never notify again + closes []chan *Error + blocks []chan Blocking + + errors chan *Error + + Config Config // The negotiated Config after connection.open + + Major int // Server's major version + Minor int // Server's minor version + Properties Table // Server properties + Locales []string // Server locales + + closed int32 // Will be 1 if the connection is closed, 0 otherwise. Should only be accessed as atomic +} + +type readDeadliner interface { + SetReadDeadline(time.Time) error +} + +// DefaultDial establishes a connection when config.Dial is not provided +func DefaultDial(connectionTimeout time.Duration) func(network, addr string) (net.Conn, error) { + return func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, connectionTimeout) + if err != nil { + return nil, err + } + + // Heartbeating hasn't started yet, don't stall forever on a dead server. + // A deadline is set for TLS and AMQP handshaking. After AMQP is established, + // the deadline is cleared in openComplete. + if err := conn.SetDeadline(time.Now().Add(connectionTimeout)); err != nil { + return nil, err + } + + return conn, nil + } +} + +// Dial accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the handshake deadline to 30 seconds. After handshake, +// deadlines are cleared. +// +// Dial uses the zero value of tls.Config when it encounters an amqps:// +// scheme. It is equivalent to calling DialTLS(amqp, nil). +func Dial(url string) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + Locale: defaultLocale, + }) +} + +// DialTLS accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// DialTLS uses the provided tls.Config when encountering an amqps:// scheme. +func DialTLS(url string, amqps *tls.Config) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + TLSClientConfig: amqps, + Locale: defaultLocale, + }) +} + +// DialConfig accepts a string in the AMQP URI format and a configuration for +// the transport and connection setup, returning a new Connection. Defaults to +// a server heartbeat interval of 10 seconds and sets the initial read deadline +// to 30 seconds. +func DialConfig(url string, config Config) (*Connection, error) { + var err error + var conn net.Conn + + uri, err := ParseURI(url) + if err != nil { + return nil, err + } + + if config.SASL == nil { + config.SASL = []Authentication{uri.PlainAuth()} + } + + if config.Vhost == "" { + config.Vhost = uri.Vhost + } + + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) + + dialer := config.Dial + if dialer == nil { + dialer = DefaultDial(defaultConnectionTimeout) + } + + conn, err = dialer("tcp", addr) + if err != nil { + return nil, err + } + + if uri.Scheme == "amqps" { + if config.TLSClientConfig == nil { + config.TLSClientConfig = new(tls.Config) + } + + // If ServerName has not been specified in TLSClientConfig, + // set it to the URI host used for this connection. + if config.TLSClientConfig.ServerName == "" { + config.TLSClientConfig.ServerName = uri.Host + } + + client := tls.Client(conn, config.TLSClientConfig) + if err := client.Handshake(); err != nil { + + conn.Close() + return nil, err + } + + conn = client + } + + return Open(conn, config) +} + +/* +Open accepts an already established connection, or other io.ReadWriteCloser as +a transport. Use this method if you have established a TLS connection or wish +to use your own custom transport. + +*/ +func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) { + c := &Connection{ + conn: conn, + writer: &writer{bufio.NewWriter(conn)}, + channels: make(map[uint16]*Channel), + rpc: make(chan message), + sends: make(chan time.Time), + errors: make(chan *Error, 1), + deadlines: make(chan readDeadliner, 1), + } + go c.reader(conn) + return c, c.open(config) +} + +/* +LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr) +as a fallback default value if the underlying transport does not support LocalAddr(). +*/ +func (c *Connection) LocalAddr() net.Addr { + if conn, ok := c.conn.(interface { + LocalAddr() net.Addr + }); ok { + return conn.LocalAddr() + } + return &net.TCPAddr{} +} + +// ConnectionState returns basic TLS details of the underlying transport. +// Returns a zero value when the underlying connection does not implement +// ConnectionState() tls.ConnectionState. +func (c *Connection) ConnectionState() tls.ConnectionState { + if conn, ok := c.conn.(interface { + ConnectionState() tls.ConnectionState + }); ok { + return conn.ConnectionState() + } + return tls.ConnectionState{} +} + +/* +NotifyClose registers a listener for close events either initiated by an error +accompanying a connection.close method or by a normal shutdown. + +On normal shutdowns, the chan will be closed. + +To reconnect after a transport or protocol error, register a listener here and +re-run your setup process. + +*/ +func (c *Connection) NotifyClose(receiver chan *Error) chan *Error { + c.m.Lock() + defer c.m.Unlock() + + if c.noNotify { + close(receiver) + } else { + c.closes = append(c.closes, receiver) + } + + return receiver +} + +/* +NotifyBlocked registers a listener for RabbitMQ specific TCP flow control +method extensions connection.blocked and connection.unblocked. Flow control is +active with a reason when Blocking.Blocked is true. When a Connection is +blocked, all methods will block across all connections until server resources +become free again. + +This optional extension is supported by the server when the +"connection.blocked" server capability key is true. + +*/ +func (c *Connection) NotifyBlocked(receiver chan Blocking) chan Blocking { + c.m.Lock() + defer c.m.Unlock() + + if c.noNotify { + close(receiver) + } else { + c.blocks = append(c.blocks, receiver) + } + + return receiver +} + +/* +Close requests and waits for the response to close the AMQP connection. + +It's advisable to use this message when publishing to ensure all kernel buffers +have been flushed on the server and client before exiting. + +An error indicates that server may not have received this request to close but +the connection should be treated as closed regardless. + +After returning from this call, all resources associated with this connection, +including the underlying io, Channels, Notify listeners and Channel consumers +will also be closed. +*/ +func (c *Connection) Close() error { + if c.IsClosed() { + return ErrClosed + } + + defer c.shutdown(nil) + return c.call( + &connectionClose{ + ReplyCode: replySuccess, + ReplyText: "kthxbai", + }, + &connectionCloseOk{}, + ) +} + +func (c *Connection) closeWith(err *Error) error { + if c.IsClosed() { + return ErrClosed + } + + defer c.shutdown(err) + return c.call( + &connectionClose{ + ReplyCode: uint16(err.Code), + ReplyText: err.Reason, + }, + &connectionCloseOk{}, + ) +} + +// IsClosed returns true if the connection is marked as closed, otherwise false +// is returned. +func (c *Connection) IsClosed() bool { + return (atomic.LoadInt32(&c.closed) == 1) +} + +func (c *Connection) send(f frame) error { + if c.IsClosed() { + return ErrClosed + } + + c.sendM.Lock() + err := c.writer.WriteFrame(f) + c.sendM.Unlock() + + if err != nil { + // shutdown could be re-entrant from signaling notify chans + go c.shutdown(&Error{ + Code: FrameError, + Reason: err.Error(), + }) + } else { + // Broadcast we sent a frame, reducing heartbeats, only + // if there is something that can receive - like a non-reentrant + // call or if the heartbeater isn't running + select { + case c.sends <- time.Now(): + default: + } + } + + return err +} + +func (c *Connection) shutdown(err *Error) { + atomic.StoreInt32(&c.closed, 1) + + c.destructor.Do(func() { + c.m.Lock() + defer c.m.Unlock() + + if err != nil { + for _, c := range c.closes { + c <- err + } + } + + if err != nil { + c.errors <- err + } + // Shutdown handler goroutine can still receive the result. + close(c.errors) + + for _, c := range c.closes { + close(c) + } + + for _, c := range c.blocks { + close(c) + } + + // Shutdown the channel, but do not use closeChannel() as it calls + // releaseChannel() which requires the connection lock. + // + // Ranging over c.channels and calling releaseChannel() that mutates + // c.channels is racy - see commit 6063341 for an example. + for _, ch := range c.channels { + ch.shutdown(err) + } + + c.conn.Close() + + c.channels = map[uint16]*Channel{} + c.allocator = newAllocator(1, c.Config.ChannelMax) + c.noNotify = true + }) +} + +// All methods sent to the connection channel should be synchronous so we +// can handle them directly without a framing component +func (c *Connection) demux(f frame) { + if f.channel() == 0 { + c.dispatch0(f) + } else { + c.dispatchN(f) + } +} + +func (c *Connection) dispatch0(f frame) { + switch mf := f.(type) { + case *methodFrame: + switch m := mf.Method.(type) { + case *connectionClose: + // Send immediately as shutdown will close our side of the writer. + c.send(&methodFrame{ + ChannelId: 0, + Method: &connectionCloseOk{}, + }) + + c.shutdown(newError(m.ReplyCode, m.ReplyText)) + case *connectionBlocked: + for _, c := range c.blocks { + c <- Blocking{Active: true, Reason: m.Reason} + } + case *connectionUnblocked: + for _, c := range c.blocks { + c <- Blocking{Active: false} + } + default: + c.rpc <- m + } + case *heartbeatFrame: + // kthx - all reads reset our deadline. so we can drop this + default: + // lolwat - channel0 only responds to methods and heartbeats + c.closeWith(ErrUnexpectedFrame) + } +} + +func (c *Connection) dispatchN(f frame) { + c.m.Lock() + channel := c.channels[f.channel()] + c.m.Unlock() + + if channel != nil { + channel.recv(channel, f) + } else { + c.dispatchClosed(f) + } +} + +// section 2.3.7: "When a peer decides to close a channel or connection, it +// sends a Close method. The receiving peer MUST respond to a Close with a +// Close-Ok, and then both parties can close their channel or connection. Note +// that if peers ignore Close, deadlock can happen when both peers send Close +// at the same time." +// +// When we don't have a channel, so we must respond with close-ok on a close +// method. This can happen between a channel exception on an asynchronous +// method like basic.publish and a synchronous close with channel.close. +// In that case, we'll get both a channel.close and channel.close-ok in any +// order. +func (c *Connection) dispatchClosed(f frame) { + // Only consider method frames, drop content/header frames + if mf, ok := f.(*methodFrame); ok { + switch mf.Method.(type) { + case *channelClose: + c.send(&methodFrame{ + ChannelId: f.channel(), + Method: &channelCloseOk{}, + }) + case *channelCloseOk: + // we are already closed, so do nothing + default: + // unexpected method on closed channel + c.closeWith(ErrClosed) + } + } +} + +// Reads each frame off the IO and hand off to the connection object that +// will demux the streams and dispatch to one of the opened channels or +// handle on channel 0 (the connection channel). +func (c *Connection) reader(r io.Reader) { + buf := bufio.NewReader(r) + frames := &reader{buf} + conn, haveDeadliner := r.(readDeadliner) + + for { + frame, err := frames.ReadFrame() + + if err != nil { + c.shutdown(&Error{Code: FrameError, Reason: err.Error()}) + return + } + + c.demux(frame) + + if haveDeadliner { + c.deadlines <- conn + } + } +} + +// Ensures that at least one frame is being sent at the tuned interval with a +// jitter tolerance of 1s +func (c *Connection) heartbeater(interval time.Duration, done chan *Error) { + const maxServerHeartbeatsInFlight = 3 + + var sendTicks <-chan time.Time + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + sendTicks = ticker.C + } + + lastSent := time.Now() + + for { + select { + case at, stillSending := <-c.sends: + // When actively sending, depend on sent frames to reset server timer + if stillSending { + lastSent = at + } else { + return + } + + case at := <-sendTicks: + // When idle, fill the space with a heartbeat frame + if at.Sub(lastSent) > interval-time.Second { + if err := c.send(&heartbeatFrame{}); err != nil { + // send heartbeats even after close/closeOk so we + // tick until the connection starts erroring + return + } + } + + case conn := <-c.deadlines: + // When reading, reset our side of the deadline, if we've negotiated one with + // a deadline that covers at least 2 server heartbeats + if interval > 0 { + conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)) + } + + case <-done: + return + } + } +} + +// Convenience method to inspect the Connection.Properties["capabilities"] +// Table for server identified capabilities like "basic.ack" or +// "confirm.select". +func (c *Connection) isCapable(featureName string) bool { + capabilities, _ := c.Properties["capabilities"].(Table) + hasFeature, _ := capabilities[featureName].(bool) + return hasFeature +} + +// allocateChannel records but does not open a new channel with a unique id. +// This method is the initial part of the channel lifecycle and paired with +// releaseChannel +func (c *Connection) allocateChannel() (*Channel, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.IsClosed() { + return nil, ErrClosed + } + + id, ok := c.allocator.next() + if !ok { + return nil, ErrChannelMax + } + + ch := newChannel(c, uint16(id)) + c.channels[uint16(id)] = ch + + return ch, nil +} + +// releaseChannel removes a channel from the registry as the final part of the +// channel lifecycle +func (c *Connection) releaseChannel(id uint16) { + c.m.Lock() + defer c.m.Unlock() + + delete(c.channels, id) + c.allocator.release(int(id)) +} + +// openChannel allocates and opens a channel, must be paired with closeChannel +func (c *Connection) openChannel() (*Channel, error) { + ch, err := c.allocateChannel() + if err != nil { + return nil, err + } + + if err := ch.open(); err != nil { + c.releaseChannel(ch.id) + return nil, err + } + return ch, nil +} + +// closeChannel releases and initiates a shutdown of the channel. All channel +// closures should be initiated here for proper channel lifecycle management on +// this connection. +func (c *Connection) closeChannel(ch *Channel, e *Error) { + ch.shutdown(e) + c.releaseChannel(ch.id) +} + +/* +Channel opens a unique, concurrent server channel to process the bulk of AMQP +messages. Any error from methods on this receiver will render the receiver +invalid and a new Channel should be opened. + +*/ +func (c *Connection) Channel() (*Channel, error) { + return c.openChannel() +} + +func (c *Connection) call(req message, res ...message) error { + // Special case for when the protocol header frame is sent insted of a + // request method + if req != nil { + if err := c.send(&methodFrame{ChannelId: 0, Method: req}); err != nil { + return err + } + } + + select { + case err, ok := <-c.errors: + if !ok { + return ErrClosed + } + return err + + case msg := <-c.rpc: + // Try to match one of the result types + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + // unreachable +} + +// Connection = open-Connection *use-Connection close-Connection +// open-Connection = C:protocol-header +// S:START C:START-OK +// *challenge +// S:TUNE C:TUNE-OK +// C:OPEN S:OPEN-OK +// challenge = S:SECURE C:SECURE-OK +// use-Connection = *channel +// close-Connection = C:CLOSE S:CLOSE-OK +// / S:CLOSE C:CLOSE-OK +func (c *Connection) open(config Config) error { + if err := c.send(&protocolHeader{}); err != nil { + return err + } + + return c.openStart(config) +} + +func (c *Connection) openStart(config Config) error { + start := &connectionStart{} + + if err := c.call(nil, start); err != nil { + return err + } + + c.Major = int(start.VersionMajor) + c.Minor = int(start.VersionMinor) + c.Properties = Table(start.ServerProperties) + c.Locales = strings.Split(start.Locales, " ") + + // eventually support challenge/response here by also responding to + // connectionSecure. + auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " ")) + if !ok { + return ErrSASL + } + + // Save this mechanism off as the one we chose + c.Config.SASL = []Authentication{auth} + + // Set the connection locale to client locale + c.Config.Locale = config.Locale + + return c.openTune(config, auth) +} + +func (c *Connection) openTune(config Config, auth Authentication) error { + if len(config.Properties) == 0 { + config.Properties = Table{ + "product": defaultProduct, + "version": defaultVersion, + } + } + + config.Properties["capabilities"] = Table{ + "connection.blocked": true, + "consumer_cancel_notify": true, + } + + ok := &connectionStartOk{ + ClientProperties: config.Properties, + Mechanism: auth.Mechanism(), + Response: auth.Response(), + Locale: config.Locale, + } + tune := &connectionTune{} + + if err := c.call(ok, tune); err != nil { + // per spec, a connection can only be closed when it has been opened + // so at this point, we know it's an auth error, but the socket + // was closed instead. Return a meaningful error. + return ErrCredentials + } + + // When the server and client both use default 0, then the max channel is + // only limited by uint16. + c.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + if c.Config.ChannelMax == 0 { + c.Config.ChannelMax = defaultChannelMax + } + c.Config.ChannelMax = min(c.Config.ChannelMax, maxChannelMax) + + // Frame size includes headers and end byte (len(payload)+8), even if + // this is less than FrameMinSize, use what the server sends because the + // alternative is to stop the handshake here. + c.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax)) + + // Save this off for resetDeadline() + c.Config.Heartbeat = time.Second * time.Duration(pick( + int(config.Heartbeat/time.Second), + int(tune.Heartbeat))) + + // "The client should start sending heartbeats after receiving a + // Connection.Tune method" + go c.heartbeater(c.Config.Heartbeat, c.NotifyClose(make(chan *Error, 1))) + + if err := c.send(&methodFrame{ + ChannelId: 0, + Method: &connectionTuneOk{ + ChannelMax: uint16(c.Config.ChannelMax), + FrameMax: uint32(c.Config.FrameSize), + Heartbeat: uint16(c.Config.Heartbeat / time.Second), + }, + }); err != nil { + return err + } + + return c.openVhost(config) +} + +func (c *Connection) openVhost(config Config) error { + req := &connectionOpen{VirtualHost: config.Vhost} + res := &connectionOpenOk{} + + if err := c.call(req, res); err != nil { + // Cannot be closed yet, but we know it's a vhost problem + return ErrVhost + } + + c.Config.Vhost = config.Vhost + + return c.openComplete() +} + +// openComplete performs any final Connection initialization dependent on the +// connection handshake and clears any state needed for TLS and AMQP handshaking. +func (c *Connection) openComplete() error { + // We clear the deadlines and let the heartbeater reset the read deadline if requested. + // RabbitMQ uses TCP flow control at this point for pushback so Writes can + // intentionally block. + if deadliner, ok := c.conn.(interface { + SetDeadline(time.Time) error + }); ok { + _ = deadliner.SetDeadline(time.Time{}) + } + + c.allocator = newAllocator(1, c.Config.ChannelMax) + return nil +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func pick(client, server int) int { + if client == 0 || server == 0 { + return max(client, server) + } + return min(client, server) +} diff --git a/vendor/github.com/streadway/amqp/consumers.go b/vendor/github.com/streadway/amqp/consumers.go new file mode 100644 index 0000000..887ac74 --- /dev/null +++ b/vendor/github.com/streadway/amqp/consumers.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "os" + "strconv" + "sync" + "sync/atomic" +) + +var consumerSeq uint64 + +const consumerTagLengthMax = 0xFF // see writeShortstr + +func uniqueConsumerTag() string { + return commandNameBasedUniqueConsumerTag(os.Args[0]) +} + +func commandNameBasedUniqueConsumerTag(commandName string) string { + tagPrefix := "ctag-" + tagInfix := commandName + tagSuffix := "-" + strconv.FormatUint(atomic.AddUint64(&consumerSeq, 1), 10) + + if len(tagPrefix)+len(tagInfix)+len(tagSuffix) > consumerTagLengthMax { + tagInfix = "streadway/amqp" + } + + return tagPrefix + tagInfix + tagSuffix +} + +type consumerBuffers map[string]chan *Delivery + +// Concurrent type that manages the consumerTag -> +// ingress consumerBuffer mapping +type consumers struct { + sync.WaitGroup // one for buffer + closed chan struct{} // signal buffer + + sync.Mutex // protects below + chans consumerBuffers +} + +func makeConsumers() *consumers { + return &consumers{ + closed: make(chan struct{}), + chans: make(consumerBuffers), + } +} + +func (subs *consumers) buffer(in chan *Delivery, out chan Delivery) { + defer close(out) + defer subs.Done() + + var inflight = in + var queue []*Delivery + + for delivery := range in { + queue = append(queue, delivery) + + for len(queue) > 0 { + select { + case <-subs.closed: + // closed before drained, drop in-flight + return + + case delivery, consuming := <-inflight: + if consuming { + queue = append(queue, delivery) + } else { + inflight = nil + } + + case out <- *queue[0]: + queue = queue[1:] + } + } + } +} + +// On key conflict, close the previous channel. +func (subs *consumers) add(tag string, consumer chan Delivery) { + subs.Lock() + defer subs.Unlock() + + if prev, found := subs.chans[tag]; found { + close(prev) + } + + in := make(chan *Delivery) + subs.chans[tag] = in + + subs.Add(1) + go subs.buffer(in, consumer) +} + +func (subs *consumers) cancel(tag string) (found bool) { + subs.Lock() + defer subs.Unlock() + + ch, found := subs.chans[tag] + + if found { + delete(subs.chans, tag) + close(ch) + } + + return found +} + +func (subs *consumers) close() { + subs.Lock() + defer subs.Unlock() + + close(subs.closed) + + for tag, ch := range subs.chans { + delete(subs.chans, tag) + close(ch) + } + + subs.Wait() +} + +// Sends a delivery to a the consumer identified by `tag`. +// If unbuffered channels are used for Consume this method +// could block all deliveries until the consumer +// receives on the other end of the channel. +func (subs *consumers) send(tag string, msg *Delivery) bool { + subs.Lock() + defer subs.Unlock() + + buffer, found := subs.chans[tag] + if found { + buffer <- msg + } + + return found +} diff --git a/vendor/github.com/streadway/amqp/delivery.go b/vendor/github.com/streadway/amqp/delivery.go new file mode 100644 index 0000000..7241264 --- /dev/null +++ b/vendor/github.com/streadway/amqp/delivery.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "time" +) + +var errDeliveryNotInitialized = errors.New("delivery not initialized") + +// Acknowledger notifies the server of successful or failed consumption of +// delivieries via identifier found in the Delivery.DeliveryTag field. +// +// Applications can provide mock implementations in tests of Delivery handlers. +type Acknowledger interface { + Ack(tag uint64, multiple bool) error + Nack(tag uint64, multiple bool, requeue bool) error + Reject(tag uint64, requeue bool) error +} + +// Delivery captures the fields for a previously delivered message resident in +// a queue to be delivered by the server to a consumer from Channel.Consume or +// Channel.Get. +type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + Body []byte +} + +func newDelivery(channel *Channel, msg messageWithContent) *Delivery { + props, body := msg.getContent() + + delivery := Delivery{ + Acknowledger: channel, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } + + // Properties for the delivery types + switch m := msg.(type) { + case *basicDeliver: + delivery.ConsumerTag = m.ConsumerTag + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + + case *basicGetOk: + delivery.MessageCount = m.MessageCount + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + } + + return &delivery +} + +/* +Ack delegates an acknowledgement through the Acknowledger interface that the +client or server has finished work on a delivery. + +All deliveries in AMQP must be acknowledged. If you called Channel.Consume +with autoAck true then the server will be automatically ack each message and +this method should not be called. Otherwise, you must call Delivery.Ack after +you have successfully processed this delivery. + +When multiple is true, this delivery and all prior unacknowledged deliveries +on the same channel will be acknowledged. This is useful for batch processing +of deliveries. + +An error will indicate that the acknowledge could not be delivered to the +channel it was sent from. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Ack(multiple bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Ack(d.DeliveryTag, multiple) +} + +/* +Reject delegates a negatively acknowledgement through the Acknowledger interface. + +When requeue is true, queue this message to be delivered to a consumer on a +different channel. When requeue is false or the server is unable to queue this +message, it will be dropped. + +If you are batch processing deliveries, and your server supports it, prefer +Delivery.Nack. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Reject(requeue bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Reject(d.DeliveryTag, requeue) +} + +/* +Nack negatively acknowledge the delivery of message(s) identified by the +delivery tag from either the client or server. + +When multiple is true, nack messages up to and including delivered messages up +until the delivery tag delivered on the same channel. + +When requeue is true, request the server to deliver this message to a different +consumer. If it is not possible or requeue is false, the message will be +dropped or delivered to a server configured dead-letter queue. + +This method must not be used to select or requeue messages the client wishes +not to handle, rather it is to inform the server that the client is incapable +of handling this message at this time. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Nack(multiple, requeue bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Nack(d.DeliveryTag, multiple, requeue) +} diff --git a/vendor/github.com/streadway/amqp/doc.go b/vendor/github.com/streadway/amqp/doc.go new file mode 100644 index 0000000..ee69c5b --- /dev/null +++ b/vendor/github.com/streadway/amqp/doc.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* +Package amqp is an AMQP 0.9.1 client with RabbitMQ extensions + +Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much +of the terminology in this library directly relates to AMQP concepts. + + Resources + + http://www.rabbitmq.com/tutorials/amqp-concepts.html + http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html + +Design + +Most other broker clients publish to queues, but in AMQP, clients publish +Exchanges instead. AMQP is programmable, meaning that both the producers and +consumers agree on the configuration of the broker, instead of requiring an +operator or system configuration that declares the logical topology in the +broker. The routing between producers and consumer queues is via Bindings. +These bindings form the logical topology of the broker. + +In this library, a message sent from publisher is called a "Publishing" and a +message received to a consumer is called a "Delivery". The fields of +Publishings and Deliveries are close but not exact mappings to the underlying +wire format to maintain stronger types. Many other libraries will combine +message properties with message headers. In this library, the message well +known properties are strongly typed fields on the Publishings and Deliveries, +whereas the user defined headers are in the Headers field. + +The method naming closely matches the protocol's method name with positional +parameters mapping to named protocol message fields. The motivation here is to +present a comprehensive view over all possible interactions with the server. + +Generally, methods that map to protocol methods of the "basic" class will be +elided in this interface, and "select" methods of various channel mode selectors +will be elided for example Channel.Confirm and Channel.Tx. + +The library is intentionally designed to be synchronous, where responses for +each protocol message are required to be received in an RPC manner. Some +methods have a noWait parameter like Channel.QueueDeclare, and some methods are +asynchronous like Channel.Publish. The error values should still be checked for +these methods as they will indicate IO failures like when the underlying +connection closes. + +Asynchronous Events + +Clients of this library may be interested in receiving some of the protocol +messages other than Deliveries like basic.ack methods while a channel is in +confirm mode. + +The Notify* methods with Connection and Channel receivers model the pattern of +asynchronous events like closes due to exceptions, or messages that are sent out +of band from an RPC call like basic.ack or basic.flow. + +Any asynchronous events, including Deliveries and Publishings must always have +a receiver until the corresponding chans are closed. Without asynchronous +receivers, the sychronous methods will block. + +Use Case + +It's important as a client to an AMQP topology to ensure the state of the +broker matches your expectations. For both publish and consume use cases, +make sure you declare the queues, exchanges and bindings you expect to exist +prior to calling Channel.Publish or Channel.Consume. + + // Connections start with amqp.Dial() typically from a command line argument + // or environment variable. + connection, err := amqp.Dial(os.Getenv("AMQP_URL")) + + // To cleanly shutdown by flushing kernel buffers, make sure to close and + // wait for the response. + defer connection.Close() + + // Most operations happen on a channel. If any error is returned on a + // channel, the channel will no longer be valid, throw it away and try with + // a different channel. If you use many channels, it's useful for the + // server to + channel, err := connection.Channel() + + // Declare your topology here, if it doesn't exist, it will be created, if + // it existed already and is not what you expect, then that's considered an + // error. + + // Use your connection on this topology with either Publish or Consume, or + // inspect your queues with QueueInspect. It's unwise to mix Publish and + // Consume to let TCP do its job well. + +SSL/TLS - Secure connections + +When Dial encounters an amqps:// scheme, it will use the zero value of a +tls.Config. This will only perform server certificate and host verification. + +Use DialTLS when you wish to provide a client certificate (recommended), +include a private certificate authority's certificate in the cert chain for +server validity, or run insecure by not verifying the server certificate dial +your own connection. DialTLS will use the provided tls.Config when it +encounters an amqps:// scheme and will dial a plain connection when it +encounters an amqp:// scheme. + +SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html + +*/ +package amqp diff --git a/vendor/github.com/streadway/amqp/fuzz.go b/vendor/github.com/streadway/amqp/fuzz.go new file mode 100644 index 0000000..16e626c --- /dev/null +++ b/vendor/github.com/streadway/amqp/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package amqp + +import "bytes" + +func Fuzz(data []byte) int { + r := reader{bytes.NewReader(data)} + frame, err := r.ReadFrame() + if err != nil { + if frame != nil { + panic("frame is not nil") + } + return 0 + } + return 1 +} diff --git a/vendor/github.com/streadway/amqp/gen.sh b/vendor/github.com/streadway/amqp/gen.sh new file mode 100644 index 0000000..d46e19b --- /dev/null +++ b/vendor/github.com/streadway/amqp/gen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go diff --git a/vendor/github.com/streadway/amqp/go.mod b/vendor/github.com/streadway/amqp/go.mod new file mode 100644 index 0000000..4eeab33 --- /dev/null +++ b/vendor/github.com/streadway/amqp/go.mod @@ -0,0 +1,3 @@ +module github.com/streadway/amqp + +go 1.10 diff --git a/vendor/github.com/streadway/amqp/pre-commit b/vendor/github.com/streadway/amqp/pre-commit new file mode 100644 index 0000000..3715530 --- /dev/null +++ b/vendor/github.com/streadway/amqp/pre-commit @@ -0,0 +1,67 @@ +#!/bin/sh + +LATEST_STABLE_SUPPORTED_GO_VERSION="1.11" + +main() { + if local_go_version_is_latest_stable + then + run_gofmt + run_golint + run_govet + fi + run_unit_tests +} + +local_go_version_is_latest_stable() { + go version | grep -q $LATEST_STABLE_SUPPORTED_GO_VERSION +} + +log_error() { + echo "$*" 1>&2 +} + +run_gofmt() { + GOFMT_FILES=$(gofmt -l .) + if [ -n "$GOFMT_FILES" ] + then + log_error "gofmt failed for the following files: +$GOFMT_FILES + +please run 'gofmt -w .' on your changes before committing." + exit 1 + fi +} + +run_golint() { + GOLINT_ERRORS=$(golint ./... | grep -v "Id should be") + if [ -n "$GOLINT_ERRORS" ] + then + log_error "golint failed for the following reasons: +$GOLINT_ERRORS + +please run 'golint ./...' on your changes before committing." + exit 1 + fi +} + +run_govet() { + GOVET_ERRORS=$(go tool vet ./*.go 2>&1) + if [ -n "$GOVET_ERRORS" ] + then + log_error "go vet failed for the following reasons: +$GOVET_ERRORS + +please run 'go tool vet ./*.go' on your changes before committing." + exit 1 + fi +} + +run_unit_tests() { + if [ -z "$NOTEST" ] + then + log_error 'Running short tests...' + env AMQP_URL= go test -short + fi +} + +main diff --git a/vendor/github.com/streadway/amqp/read.go b/vendor/github.com/streadway/amqp/read.go new file mode 100644 index 0000000..3aa0b33 --- /dev/null +++ b/vendor/github.com/streadway/amqp/read.go @@ -0,0 +1,456 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + methodFrame + PropertiesFrame + bodyFrame + heartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (r *reader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(r.r, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + switch typ { + case frameMethod: + if frame, err = r.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = r.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = r.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = r.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(r.r, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + // slices can't be longer than max int32 value + if length > (^uint32(0) >> 1) { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var ( + size uint32 + err error + ) + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + var ( + lim = &io.LimitedReader{R: r, N: int64(size)} + arr = []interface{}{} + field interface{} + ) + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (r *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &headerFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(r.r, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(r.r); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(r.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(r.r, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(r.r); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(r.r); err != nil { + return + } + } + + return hf, nil +} + +func (r *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &bodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(r.r, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (r *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &heartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/vendor/github.com/streadway/amqp/return.go b/vendor/github.com/streadway/amqp/return.go new file mode 100644 index 0000000..10dcedb --- /dev/null +++ b/vendor/github.com/streadway/amqp/return.go @@ -0,0 +1,64 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "time" +) + +// Return captures a flattened struct of fields returned by the server when a +// Publishing is unable to be delivered either due to the `mandatory` flag set +// and no route found, or `immediate` flag set and no free consumer. +type Return struct { + ReplyCode uint16 // reason + ReplyText string // description + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + + Body []byte +} + +func newReturn(msg basicReturn) *Return { + props, body := msg.getContent() + + return &Return{ + ReplyCode: msg.ReplyCode, + ReplyText: msg.ReplyText, + Exchange: msg.Exchange, + RoutingKey: msg.RoutingKey, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } +} diff --git a/vendor/github.com/streadway/amqp/spec091.go b/vendor/github.com/streadway/amqp/spec091.go new file mode 100644 index 0000000..cd53ebe --- /dev/null +++ b/vendor/github.com/streadway/amqp/spec091.go @@ -0,0 +1,3306 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package amqp + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type connectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (msg *connectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (msg *connectionStart) wait() bool { + return true +} + +func (msg *connectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.VersionMinor); err != nil { + return + } + + if err = writeTable(w, msg.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, msg.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, msg.Locales); err != nil { + return + } + + return +} + +func (msg *connectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.VersionMinor); err != nil { + return + } + + if msg.ServerProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanisms, err = readLongstr(r); err != nil { + return + } + if msg.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (msg *connectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (msg *connectionStartOk) wait() bool { + return true +} + +func (msg *connectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, msg.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, msg.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + if err = writeShortstr(w, msg.Locale); err != nil { + return + } + + return +} + +func (msg *connectionStartOk) read(r io.Reader) (err error) { + + if msg.ClientProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanism, err = readShortstr(r); err != nil { + return + } + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + if msg.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (msg *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (msg *connectionSecure) wait() bool { + return true +} + +func (msg *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Challenge); err != nil { + return + } + + return +} + +func (msg *connectionSecure) read(r io.Reader) (err error) { + + if msg.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (msg *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (msg *connectionSecureOk) wait() bool { + return true +} + +func (msg *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + return +} + +func (msg *connectionSecureOk) read(r io.Reader) (err error) { + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (msg *connectionTune) wait() bool { + return true +} + +func (msg *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (msg *connectionTuneOk) wait() bool { + return true +} + +func (msg *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (msg *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (msg *connectionOpen) wait() bool { + return true +} + +func (msg *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + if msg.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if msg.VirtualHost, err = readShortstr(r); err != nil { + return + } + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (msg *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (msg *connectionOpenOk) wait() bool { + return true +} + +func (msg *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *connectionOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *connectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (msg *connectionClose) wait() bool { + return true +} + +func (msg *connectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *connectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type connectionCloseOk struct { +} + +func (msg *connectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (msg *connectionCloseOk) wait() bool { + return true +} + +func (msg *connectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (msg *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (msg *connectionBlocked) wait() bool { + return false +} + +func (msg *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Reason); err != nil { + return + } + + return +} + +func (msg *connectionBlocked) read(r io.Reader) (err error) { + + if msg.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (msg *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (msg *connectionUnblocked) wait() bool { + return false +} + +func (msg *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (msg *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (msg *channelOpen) wait() bool { + return true +} + +func (msg *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpen) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (msg *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (msg *channelOpenOk) wait() bool { + return true +} + +func (msg *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (msg *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (msg *channelFlow) wait() bool { + return true +} + +func (msg *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (msg *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (msg *channelFlowOk) wait() bool { + return false +} + +func (msg *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (msg *channelClose) wait() bool { + return true +} + +func (msg *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (msg *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (msg *channelCloseOk) wait() bool { + return true +} + +func (msg *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (msg *exchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (msg *exchangeDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.Type); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.AutoDelete { + bits |= 1 << 2 + } + + if msg.Internal { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.AutoDelete = (bits&(1<<2) > 0) + msg.Internal = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeDeclareOk struct { +} + +func (msg *exchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (msg *exchangeDeclareOk) wait() bool { + return true +} + +func (msg *exchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (msg *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (msg *exchangeDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (msg *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (msg *exchangeDeleteOk) wait() bool { + return true +} + +func (msg *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (msg *exchangeBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (msg *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (msg *exchangeBindOk) wait() bool { + return true +} + +func (msg *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (msg *exchangeUnbind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (msg *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (msg *exchangeUnbindOk) wait() bool { + return true +} + +func (msg *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (msg *queueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (msg *queueDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.AutoDelete { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.AutoDelete = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (msg *queueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (msg *queueDeclareOk) wait() bool { + return true +} + +func (msg *queueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.ConsumerCount); err != nil { + return + } + + return +} + +func (msg *queueDeclareOk) read(r io.Reader) (err error) { + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.ConsumerCount); err != nil { + return + } + + return +} + +type queueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *queueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (msg *queueBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueBindOk struct { +} + +func (msg *queueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (msg *queueBindOk) wait() bool { + return true +} + +func (msg *queueBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (msg *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (msg *queueUnbind) wait() bool { + return true +} + +func (msg *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (msg *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (msg *queueUnbindOk) wait() bool { + return true +} + +func (msg *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (msg *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (msg *queuePurge) wait() bool { + return true && !msg.NoWait +} + +func (msg *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (msg *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (msg *queuePurgeOk) wait() bool { + return true +} + +func (msg *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (msg *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (msg *queueDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.IfEmpty { + bits |= 1 << 1 + } + + if msg.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.IfEmpty = (bits&(1<<1) > 0) + msg.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (msg *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (msg *queueDeleteOk) wait() bool { + return true +} + +func (msg *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (msg *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (msg *basicQos) wait() bool { + return true +} + +func (msg *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchCount); err != nil { + return + } + + if msg.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (msg *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (msg *basicQosOk) wait() bool { + return true +} + +func (msg *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type basicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (msg *basicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (msg *basicConsume) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoLocal { + bits |= 1 << 0 + } + + if msg.NoAck { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *basicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoLocal = (bits&(1<<0) > 0) + msg.NoAck = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.NoWait = (bits&(1<<3) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type basicConsumeOk struct { + ConsumerTag string +} + +func (msg *basicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (msg *basicConsumeOk) wait() bool { + return true +} + +func (msg *basicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicConsumeOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (msg *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (msg *basicCancel) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (msg *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (msg *basicCancelOk) wait() bool { + return true +} + +func (msg *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicCancelOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties properties + Body []byte +} + +func (msg *basicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (msg *basicPublish) wait() bool { + return false +} + +func (msg *basicPublish) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicPublish) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.Mandatory { + bits |= 1 << 0 + } + + if msg.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Mandatory = (bits&(1<<0) > 0) + msg.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (msg *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (msg *basicReturn) wait() bool { + return false +} + +func (msg *basicReturn) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicReturn) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (msg *basicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (msg *basicDeliver) wait() bool { + return false +} + +func (msg *basicDeliver) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicDeliver) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicDeliver) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (msg *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (msg *basicGet) wait() bool { + return true +} + +func (msg *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties properties + Body []byte +} + +func (msg *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (msg *basicGetOk) wait() bool { + return true +} + +func (msg *basicGetOk) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicGetOk) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (msg *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (msg *basicGetEmpty) wait() bool { + return true +} + +func (msg *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *basicGetEmpty) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (msg *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (msg *basicAck) wait() bool { + return false +} + +func (msg *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (msg *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (msg *basicReject) wait() bool { + return false +} + +func (msg *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (msg *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (msg *basicRecoverAsync) wait() bool { + return false +} + +func (msg *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (msg *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (msg *basicRecover) wait() bool { + return true +} + +func (msg *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (msg *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (msg *basicRecoverOk) wait() bool { + return true +} + +func (msg *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (msg *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (msg *basicNack) wait() bool { + return false +} + +func (msg *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if msg.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + msg.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (msg *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (msg *txSelect) wait() bool { + return true +} + +func (msg *txSelect) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (msg *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (msg *txSelectOk) wait() bool { + return true +} + +func (msg *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (msg *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (msg *txCommit) wait() bool { + return true +} + +func (msg *txCommit) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (msg *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (msg *txCommitOk) wait() bool { + return true +} + +func (msg *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (msg *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (msg *txRollback) wait() bool { + return true +} + +func (msg *txRollback) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (msg *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (msg *txRollbackOk) wait() bool { + return true +} + +func (msg *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (msg *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (msg *confirmSelect) wait() bool { + return true +} + +func (msg *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if msg.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (msg *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (msg *confirmSelectOk) wait() bool { + return true +} + +func (msg *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &connectionStart{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &connectionStartOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &connectionClose{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &connectionCloseOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &exchangeDeclare{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &exchangeDeclareOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &queueDeclare{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &queueDeclareOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &queueBind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &queueBindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &basicConsume{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &basicConsumeOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &basicPublish{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &basicDeliver{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/vendor/github.com/streadway/amqp/types.go b/vendor/github.com/streadway/amqp/types.go new file mode 100644 index 0000000..83bd92f --- /dev/null +++ b/vendor/github.com/streadway/amqp/types.go @@ -0,0 +1,428 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "io" + "time" +) + +// Constants for standard AMQP 0-9-1 exchange types. +const ( + ExchangeDirect = "direct" + ExchangeFanout = "fanout" + ExchangeTopic = "topic" + ExchangeHeaders = "headers" +) + +var ( + // ErrClosed is returned when the channel or connection is not open + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + + // ErrChannelMax is returned when Connection.Channel has been called enough + // times that all channel IDs have been exhausted in the client or the + // server. + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + + // ErrSASL is returned from Dial when the authentication mechanism could not + // be negoated. + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + + // ErrCredentials is returned when the authenticated client is not authorized + // to any vhost. + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + + // ErrVhost is returned when the authenticated user is not permitted to + // access the requested Vhost. + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + + // ErrSyntax is hard protocol error, indicating an unsupported protocol, + // implementation or encoding. + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + + // ErrFrame is returned when the protocol frame cannot be read from the + // server, indicating an unsupported protocol or unsupported frame type. + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + + // ErrCommandInvalid is returned when the server sends an unexpected response + // to this requested message type. This indicates a bug in this client. + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + + // ErrUnexpectedFrame is returned when something other than a method or + // heartbeat frame is delivered to the Connection, indicating a bug in the + // client. + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + + // ErrFieldType is returned when writing a message containing a Go type unsupported by AMQP. + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with different parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (e Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", e.Code, e.Reason) +} + +// Used by header frames to capture routing and header information +type properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server successfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type conversion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %T not supported", f) +} + +// Validate returns and error if any Go types in the table are incompatible with AMQP types. +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (set tagSet) Len() int { return len(set) } +func (set tagSet) Less(i, j int) bool { return (set)[i] < (set)[j] } +func (set tagSet) Swap(i, j int) { (set)[i], (set)[j] = (set)[j], (set)[i] } +func (set *tagSet) Push(tag interface{}) { *set = append(*set, tag.(uint64)) } +func (set *tagSet) Pop() interface{} { + val := (*set)[len(*set)-1] + *set = (*set)[:len(*set)-1] + return val +} + +type message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + message + getContent() (properties, []byte) + setContent(properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type reader struct { + r io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type methodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method message +} + +func (f *methodFrame) channel() uint16 { return f.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type heartbeatFrame struct { + ChannelId uint16 +} + +func (f *heartbeatFrame) channel() uint16 { return f.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type headerFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties properties +} + +func (f *headerFrame) channel() uint16 { return f.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type bodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (f *bodyFrame) channel() uint16 { return f.ChannelId } diff --git a/vendor/github.com/streadway/amqp/uri.go b/vendor/github.com/streadway/amqp/uri.go new file mode 100644 index 0000000..e584715 --- /dev/null +++ b/vendor/github.com/streadway/amqp/uri.go @@ -0,0 +1,176 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "net" + "net/url" + "strconv" + "strings" +) + +var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'") +var errURIWhitespace = errors.New("URI must not contain whitespace") + +var schemePorts = map[string]int{ + "amqp": 5672, + "amqps": 5671, +} + +var defaultURI = URI{ + Scheme: "amqp", + Host: "localhost", + Port: 5672, + Username: "guest", + Password: "guest", + Vhost: "/", +} + +// URI represents a parsed AMQP URI string. +type URI struct { + Scheme string + Host string + Port int + Username string + Password string + Vhost string +} + +// ParseURI attempts to parse the given AMQP URI according to the spec. +// See http://www.rabbitmq.com/uri-spec.html. +// +// Default values for the fields are: +// +// Scheme: amqp +// Host: localhost +// Port: 5672 +// Username: guest +// Password: guest +// Vhost: / +// +func ParseURI(uri string) (URI, error) { + builder := defaultURI + + if strings.Contains(uri, " ") == true { + return builder, errURIWhitespace + } + + u, err := url.Parse(uri) + if err != nil { + return builder, err + } + + defaultPort, okScheme := schemePorts[u.Scheme] + + if okScheme { + builder.Scheme = u.Scheme + } else { + return builder, errURIScheme + } + + host := u.Hostname() + port := u.Port() + + if host != "" { + builder.Host = host + } + + if port != "" { + port32, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return builder, err + } + builder.Port = int(port32) + } else { + builder.Port = defaultPort + } + + if u.User != nil { + builder.Username = u.User.Username() + if password, ok := u.User.Password(); ok { + builder.Password = password + } + } + + if u.Path != "" { + if strings.HasPrefix(u.Path, "/") { + if u.Host == "" && strings.HasPrefix(u.Path, "///") { + // net/url doesn't handle local context authorities and leaves that up + // to the scheme handler. In our case, we translate amqp:/// into the + // default host and whatever the vhost should be + if len(u.Path) > 3 { + builder.Vhost = u.Path[3:] + } + } else if len(u.Path) > 1 { + builder.Vhost = u.Path[1:] + } + } else { + builder.Vhost = u.Path + } + } + + return builder, nil +} + +// PlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (uri URI) PlainAuth() *PlainAuth { + return &PlainAuth{ + Username: uri.Username, + Password: uri.Password, + } +} + +// AMQPlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (uri URI) AMQPlainAuth() *AMQPlainAuth { + return &AMQPlainAuth{ + Username: uri.Username, + Password: uri.Password, + } +} + +func (uri URI) String() string { + authority, err := url.Parse("") + if err != nil { + return err.Error() + } + + authority.Scheme = uri.Scheme + + if uri.Username != defaultURI.Username || uri.Password != defaultURI.Password { + authority.User = url.User(uri.Username) + + if uri.Password != defaultURI.Password { + authority.User = url.UserPassword(uri.Username, uri.Password) + } + } + + authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) + + if defaultPort, found := schemePorts[uri.Scheme]; !found || defaultPort != uri.Port { + authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) + } else { + // JoinHostPort() automatically add brackets to the host if it's + // an IPv6 address. + // + // If not port is specified, JoinHostPort() return an IP address in the + // form of "[::1]:", so we use TrimSuffix() to remove the extra ":". + authority.Host = strings.TrimSuffix(net.JoinHostPort(uri.Host, ""), ":") + } + + if uri.Vhost != defaultURI.Vhost { + // Make sure net/url does not double escape, e.g. + // "%2F" does not become "%252F". + authority.Path = uri.Vhost + authority.RawPath = url.QueryEscape(uri.Vhost) + } else { + authority.Path = "/" + } + + return authority.String() +} diff --git a/vendor/github.com/streadway/amqp/write.go b/vendor/github.com/streadway/amqp/write.go new file mode 100644 index 0000000..94a46d1 --- /dev/null +++ b/vendor/github.com/streadway/amqp/write.go @@ -0,0 +1,416 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (w *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(w.w); err != nil { + return + } + + if buf, ok := w.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (f *methodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if f.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := f.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = f.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, f.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (f *heartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, f.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (f *headerFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(f.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(f.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if f.Properties.Headers != nil && len(f.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if f.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if f.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(f.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(f.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(f.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(f.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if f.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(f.Properties.Type) > 0 { + mask = mask | flagType + } + if len(f.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(f.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, f.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, f.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, f.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, f.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, f.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, f.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, f.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(f.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, f.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, f.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, f.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, f.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (f *bodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, f.ChannelId, f.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/.gitignore b/vendor/github.com/taskcluster/httpbackoff/v3/.gitignore new file mode 100644 index 0000000..d558e48 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +hack + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Coverage reports +coverage.report diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/.travis.yml b/vendor/github.com/taskcluster/httpbackoff/v3/.travis.yml new file mode 100644 index 0000000..7bd4a95 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.12 + +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +script: + - go test -v -coverprofile=coverage.report + +after_script: + - go tool cover -func=coverage.report + - ${HOME}/gopath/bin/goveralls -coverprofile=coverage.report -service=travis-ci + +# currently cannot customise per user fork, see: +# https://github.com/travis-ci/travis-ci/issues/1094 +notifications: + irc: + channels: + - "irc.mozilla.org#taskcluster-bots" + on_success: change + on_failure: always + template: + - "%{repository}#%{build_number} (%{branch} - %{commit} : %{author}): %{message}" + - "Change view : %{compare_url}" + - "Build details : %{build_url}" + - "Commit message : %{commit_message}" diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/CODE_OF_CONDUCT.md b/vendor/github.com/taskcluster/httpbackoff/v3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..498baa3 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/CODE_OF_CONDUCT.md @@ -0,0 +1,15 @@ +# Community Participation Guidelines + +This repository is governed by Mozilla's code of conduct and etiquette guidelines. +For more details, please read the +[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). + +## How to Report +For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. + + diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/CONTRIBUTING.md b/vendor/github.com/taskcluster/httpbackoff/v3/CONTRIBUTING.md new file mode 100644 index 0000000..3ab3a82 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We welcome pull requests from everyone. We do expect everyone to adhere to the [Mozilla Community Participation Guidelines][participation]. + +If you're trying to figure out what to work on, here are some places to find suitable projects: +* [Good first bugs][goodfirstbug]: these are scoped to make it easy for first-time contributors to get their feet wet with Taskcluster code. +* [Mentored bugs][bugsahoy]: these are slightly more involved projects that may require insight or guidance from someone on the Taskcluster team. +* [Full list of open issues][issues]: everything else + +If the project you're interested in working on isn't covered by a bug or issue, or you're unsure about how to proceed on an existing issue, it's a good idea to talk to someone on the Taskcluster team before you go too far down a particular path. You can find us in the #taskcluster channel on [Mozilla's IRC server][irc] to discuss. You can also simply add a comment to the issue or bug. + +Once you've found an issue to work on and written a patch, submit a pull request. Some things that will increase the chance that your pull request is accepted: + +* Follow our [best practices][bestpractices]. +* This includes [writing or updating tests][testing]. +* Write a [good commit message][commit]. + +Welcome to the team! + +[participation]: https://www.mozilla.org/en-US/about/governance/policies/participation/ +[issues]: ../../issues +[bugsahoy]: https://www.joshmatthews.net/bugsahoy/?taskcluster=1 +[goodfirstbug]: http://www.joshmatthews.net/bugsahoy/?taskcluster=1&simple=1 +[irc]: https://wiki.mozilla.org/IRC +[bestpractices]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices +[testing]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/testing +[commit]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/commits + diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/LICENSE b/vendor/github.com/taskcluster/httpbackoff/v3/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/README.md b/vendor/github.com/taskcluster/httpbackoff/v3/README.md new file mode 100644 index 0000000..885a838 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/README.md @@ -0,0 +1,119 @@ + + +# httpbackoff + +[![Build Status](https://travis-ci.org/taskcluster/httpbackoff.svg?branch=master)](https://travis-ci.org/taskcluster/httpbackoff) +[![GoDoc](https://godoc.org/github.com/taskcluster/httpbackoff?status.svg)](https://godoc.org/github.com/taskcluster/httpbackoff) +[![Coverage Status](https://coveralls.io/repos/taskcluster/httpbackoff/badge.svg?branch=master&service=github)](https://coveralls.io/github/taskcluster/httpbackoff?branch=master) +[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0) + +Automatic http retries for intermittent failures, with exponential backoff, +based on https://github.com/cenkalti/backoff. + +The reason for a separate library, is that this library handles http status +codes to know whether to retry or not. HTTP codes in range 500-599 are +retried. Connection failures are also retried. Status codes 400-499 are +considered permanent errors and are not retried. + +The Retry function performs the http request and retries if temporary errors +occur. It takes a single parameter as its input - a function to perform the +http request. This function must return `(resp *http.Response, tempError error, +permError error)` where `tempError` must be non-nil if a temporary error occurs +(e.g. dropped connection), and `permError` must be non-nil if an error occurs +that does not warrant retrying the request (e.g. badly formed url). + +For example, the following code that is not using retries: + +```go +res, err := http.Get("http://www.google.com/robots.txt") +``` + +can be rewritten as: + +```go +res, attempts, err := httpbackoff.Retry(func() (*http.Response, error, error) { + resp, err := http.Get("http://www.google.com/robots.txt") + // assume all errors are temporary + return resp, err, nil +}) +``` + +Please note the additional return value `attempts` is an `int` specifying how +many http calls were made (i.e. = 1 if no retries, otherwise > 1). + +The go http package has 9 functions that return `(*http.Reponse, error)` that +can potentially be retried: + +* http://golang.org/pkg/net/http/#Client.Do +* http://golang.org/pkg/net/http/#Client.Get +* http://golang.org/pkg/net/http/#Client.Head +* http://golang.org/pkg/net/http/#Client.Post +* http://golang.org/pkg/net/http/#Client.PostForm +* http://golang.org/pkg/net/http/#Get +* http://golang.org/pkg/net/http/#Head +* http://golang.org/pkg/net/http/#Post +* http://golang.org/pkg/net/http/#PostForm + +To simplify using these functions, 9 utility functions have been written that +wrap these. Therefore you can simplify this example above further with: + +```go +res, _, err := httpbackoff.Get("http://www.google.com/robots.txt") +``` + +## Configuring back off settings + +To use cusom back off settings (for example, in testing, you might want to fail quickly), instead of calling the package functions, you can call methods of HTTPRetryClient with the same name: + +```go +package main + +import ( + "log" + "net/http/httputil" + "time" + + "github.com/cenkalti/backoff" + "github.com/taskcluster/httpbackoff" +) + +func main() { + // Note, you only need to create a client if you want to customise + // the back off settings. In this example, we want to, but if you + // wish to use the reasonable default settings, no need to do this. + retryClient := httpbackoff.Client{ + BackOffSettings: &backoff.ExponentialBackOff{ + InitialInterval: 1 * time.Millisecond, + RandomizationFactor: 0.2, + Multiplier: 1.2, + MaxInterval: 5 * time.Millisecond, + MaxElapsedTime: 20 * time.Millisecond, + Clock: backoff.SystemClock, + }, + } + + res, _, err := retryClient.Get("http://www.google.com/robots.txt") + if err != nil { + log.Fatalf("%s", err) + } + data, err := httputil.DumpResponse(res, true) + if err != nil { + log.Fatalf("%s", err) + } + log.Print(string(data)) +} +``` + +## Testing + +The package has tests, which run in travis. See http://travis-ci.org/taskcluster/httpbackoff. + +## Concurrency + +As far as I am aware, there is nothing in this library that prevents it from +being used concurrently. Please let me know if you find any problems! + +## Contributing +Contributions are welcome. Please fork, and issue a Pull Request back with an +explanation of your changes. Also please include tests for any functional +changes. diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/go.mod b/vendor/github.com/taskcluster/httpbackoff/v3/go.mod new file mode 100644 index 0000000..7b45b0a --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/taskcluster/httpbackoff/v3 + +require github.com/cenkalti/backoff/v3 v3.0.0 + +go 1.12 diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/go.sum b/vendor/github.com/taskcluster/httpbackoff/v3/go.sum new file mode 100644 index 0000000..8d1b4d2 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/go.sum @@ -0,0 +1,4 @@ +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/httpbackoff.go b/vendor/github.com/taskcluster/httpbackoff/v3/httpbackoff.go new file mode 100644 index 0000000..6b7dbe8 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/httpbackoff.go @@ -0,0 +1,287 @@ +// This package provides exponential backoff support for making HTTP requests. +// +// It uses the github.com/cenkalti/backoff algorithm. +// +// Network failures and HTTP 5xx status codes qualify for retries. +// +// HTTP calls that return HTTP 4xx status codes do not get retried. +// +// If the last HTTP request made does not result in a 2xx HTTP status code, an +// error is returned, together with the data. +// +// There are several utility methods that wrap the standard net/http package +// calls. +// +// Any function that takes no arguments and returns (*http.Response, error) can +// be retried using this library's Retry function. +// +// The methods in this library should be able to run concurrently in multiple +// go routines. +// +// Example Usage +// +// Consider this trivial HTTP GET request: +// +// res, err := http.Get("http://www.google.com/robots.txt") +// +// This can be rewritten as follows, enabling automatic retries: +// +// res, attempts, err := httpbackoff.Get("http://www.google.com/robots.txt") +// +// The variable attempts stores the number of http calls that were made (one +// plus the number of retries). +package httpbackoff + +import ( + "bufio" + "bytes" + "log" + "net/http" + "net/http/httputil" + "net/url" + "strconv" + "time" + + "github.com/cenkalti/backoff/v3" +) + +var defaultClient Client = Client{ + BackOffSettings: backoff.NewExponentialBackOff(), +} + +type Client struct { + BackOffSettings *backoff.ExponentialBackOff +} + +// Any non 2xx HTTP status code is considered a bad response code, and will +// result in a BadHttpResponseCode. +type BadHttpResponseCode struct { + HttpResponseCode int + Message string +} + +// Returns an error message for this bad HTTP response code +func (err BadHttpResponseCode) Error() string { + return err.Message +} + +// Retry is the core library method for retrying http calls. +// +// httpCall should be a function that performs the http operation, and returns +// (resp *http.Response, tempError error, permError error). Errors that should +// cause retries should be returned as tempError. Permanent errors that should +// not result in retries should be returned as permError. Retries are performed +// using the exponential backoff algorithm from the github.com/cenkalti/backoff +// package. Retry automatically treats HTTP 5xx status codes as a temporary +// error, and any other non-2xx HTTP status codes as a permanent error. Thus +// httpCall function does not need to handle the HTTP status code of resp, +// since Retry will take care of it. +// +// Concurrent use of this library method is supported. +func (httpRetryClient *Client) Retry(httpCall func() (resp *http.Response, tempError error, permError error)) (*http.Response, int, error) { + var tempError, permError error + var response *http.Response + attempts := 0 + doHttpCall := func() error { + response, tempError, permError = httpCall() + attempts += 1 + if tempError != nil { + return tempError + } + if permError != nil { + return nil + } + // only call this if there is a non 2xx response + body := func(response *http.Response) string { + // this is a no-op + raw, err := httputil.DumpResponse(response, true) + if err == nil { + return string(raw) + } + return "" + } + // now check if http response code is such that we should retry [500, 600)... + if respCode := response.StatusCode; respCode/100 == 5 { + return BadHttpResponseCode{ + HttpResponseCode: respCode, + Message: "(Intermittent) HTTP response code " + strconv.Itoa(respCode) + "\n" + body(response), + } + } + // now check http response code is ok [200, 300)... + if respCode := response.StatusCode; respCode/100 != 2 { + permError = BadHttpResponseCode{ + HttpResponseCode: respCode, + Message: "(Permanent) HTTP response code " + strconv.Itoa(respCode) + "\n" + body(response), + } + return nil + } + return nil + } + + // Make HTTP API calls using an exponential backoff algorithm... + b := backoff.ExponentialBackOff(*httpRetryClient.BackOffSettings) + backoff.RetryNotify(doHttpCall, &b, func(err error, wait time.Duration) { + log.Printf("Error: %s", err) + }) + + switch { + case permError != nil: + return response, attempts, permError + case tempError != nil: + return response, attempts, tempError + default: + return response, attempts, nil + } +} + +// Retry works the same as HTTPRetryClient.Retry, but uses the default exponential back off settings +func Retry(httpCall func() (resp *http.Response, tempError error, permError error)) (*http.Response, int, error) { + return defaultClient.Retry(httpCall) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Get where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) Get(url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.Get(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// Get works the same as HTTPRetryClient.Get, but uses the default exponential back off settings +func Get(url string) (resp *http.Response, attempts int, err error) { + return defaultClient.Get(url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Head where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) Head(url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.Head(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// Head works the same as HTTPRetryClient.Head, but uses the default exponential back off settings +func Head(url string) (resp *http.Response, attempts int, err error) { + return defaultClient.Head(url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Post where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) Post(url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.Post(url, bodyType, bytes.NewBuffer(body)) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// Post works the same as HTTPRetryClient.Post, but uses the default exponential back off settings +func Post(url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return defaultClient.Post(url, bodyType, body) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#PostForm where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) PostForm(url string, data url.Values) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.PostForm(url, data) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// PostForm works the same as HTTPRetryClient.PostForm, but uses the default exponential back off settings +func PostForm(url string, data url.Values) (resp *http.Response, attempts int, err error) { + return defaultClient.PostForm(url, data) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Do where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientDo(c *http.Client, req *http.Request) (resp *http.Response, attempts int, err error) { + rawReq, err := httputil.DumpRequestOut(req, true) + // fatal + if err != nil { + return nil, 0, err + } + return httpRetryClient.Retry(func() (*http.Response, error, error) { + newReq, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(rawReq))) + newReq.RequestURI = "" + newReq.URL = req.URL + // If the original request doesn't explicitly set Accept-Encoding, then + // the go standard library will add it, and allow gzip compression, and + // magically unzip the response transparently. This wouldn't be too + // much of a problem, except that if the header is explicitly set, then + // the standard library won't automatically unzip the response. This is + // arguably a bug in the standard library but we'll work around it by + // checking this specific condition. + if req.Header.Get("Accept-Encoding") == "" { + newReq.Header.Del("Accept-Encoding") + } + if err != nil { + return nil, nil, err // fatal + } + resp, err := c.Do(newReq) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientDo works the same as HTTPRetryClient.ClientDo, but uses the default exponential back off settings +func ClientDo(c *http.Client, req *http.Request) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientDo(c, req) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Get where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientGet(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.Get(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientGet works the same as HTTPRetryClient.ClientGet, but uses the default exponential back off settings +func ClientGet(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientGet(c, url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Head where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientHead(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.Head(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientHead works the same as HTTPRetryClient.ClientHead, but uses the default exponential back off settings +func ClientHead(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientHead(c, url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Post where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientPost(c *http.Client, url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.Post(url, bodyType, bytes.NewBuffer(body)) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientPost works the same as HTTPRetryClient.ClientPost, but uses the default exponential back off settings +func ClientPost(c *http.Client, url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientPost(c, url, bodyType, body) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.PostForm where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientPostForm(c *http.Client, url string, data url.Values) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.PostForm(url, data) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientPostForm works the same as HTTPRetryClient.ClientPostForm, but uses the default exponential back off settings +func ClientPostForm(c *http.Client, url string, data url.Values) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientPostForm(c, url, data) +} diff --git a/vendor/github.com/taskcluster/jsonschema2go/LICENSE b/vendor/github.com/taskcluster/jsonschema2go/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/jsonschema2go/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/jsonschema2go/text/text.go b/vendor/github.com/taskcluster/jsonschema2go/text/text.go new file mode 100644 index 0000000..30c4817 --- /dev/null +++ b/vendor/github.com/taskcluster/jsonschema2go/text/text.go @@ -0,0 +1,239 @@ +// Package text contains utility functions for manipulating raw text strings +package text + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "github.com/fatih/camelcase" +) + +// See https://golang.org/ref/spec#Keywords +var reservedKeyWords = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "defer": true, + "else": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +// taken from https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702 +var commonInitialisms = map[string]bool{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "OS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} + +// Indent indents a block of text with an indent string. It does this by +// placing the given indent string at the front of every line, except on the +// last line, if the last line has no characters. This special treatment +// simplifies the generation of nested text structures. +func Indent(text, indent string) string { + if text == "" { + return text + } + if text[len(text)-1:] == "\n" { + result := "" + for _, j := range strings.Split(text[:len(text)-1], "\n") { + result += indent + j + "\n" + } + return result + } + result := "" + for _, j := range strings.Split(strings.TrimRight(text, "\n"), "\n") { + result += indent + j + "\n" + } + return result[:len(result)-1] +} + +// Underline returns the provided text together with a new line character and a +// line of "=" characters whose length is equal to the maximum line length in +// the provided text, followed by a final newline character. +func Underline(text string) string { + var maxlen int + for _, j := range strings.Split(text, "\n") { + if len(j) > maxlen { + maxlen = len(j) + } + } + return text + "\n" + strings.Repeat("=", maxlen) + "\n" +} + +// Returns a string of the same length, filled with "*"s. +func StarOut(text string) string { + return strings.Repeat("*", len(text)) +} + +// GoIdentifierFrom provides a mechanism to mutate an arbitrary descriptive +// string (name) into a Go identifier (variable name, function name, etc) that +// e.g. can be used in generated code, taking into account a blacklist of names +// that should not be used, plus the blacklist of the go language reserved key +// words (https://golang.org/ref/spec#Keywords), in order to guarantee that a +// new name is created which will not conflict with an existing type. +// +// Identifier syntax: https://golang.org/ref/spec#Identifiers +// +// Strategy to convert arbitrary unicode string to a valid identifier: +// +// 1) Ensure name is valid UTF-8; if not, replace it with empty string +// +// 2) Split name into arrays of allowed runes (words), by considering a run of +// disallowed unicode characters to act as a separator, where allowed runes +// include unicode letters, unicode numbers, and '_' character (disallowed +// runes are discarded) +// +// 3) Split words further into sub words, by decomposing camel case words as +// per https://github.com/fatih/camelcase#usage-and-examples +// +// 4) Designate the case of all subwords of all words to be uppercase, with the +// exception of the first subword of the first word, which should be lowercase +// if exported is false, otherwise uppercase +// +// 5) For each subword of each word, adjust as follows: if designated as +// lowercase, lowercase all characters of the subword; if designated as +// uppercase, then if recognised as a common "initialism", then uppercase all +// the characters of the subword, otherwise uppercase only the first character +// of the subword. Common "Initialisms" are defined as per: +// https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702 +// +// 6) Rejoin subwords to form a single word +// +// 7) Rejoin words into a single string +// +// 8) If the string starts with a number, add a leading `_` +// +// 9) If the string is the empty string or "_", set as "Identifier" +// +// 10) If the resulting identifier is in the given blacklist, or the list of +// reserved key words (https://golang.org/ref/spec#Keywords), append the lowest +// integer possible, >= 1, that results in no blacklist conflict +// +// 11) Add the new name to the given blacklist +// +// Note, the `map[string]bool` construction is simply a mechanism to implement +// set semantics; a value of `true` signifies inclusion in the set. +// Non-existence is equivalent to existence with a value of `false`; therefore +// it is recommended to only store `true` values. +func GoIdentifierFrom(name string, exported bool, blacklist map[string]bool) (identifier string) { + if !utf8.ValidString(name) { + name = "" + } + for i, word := range strings.FieldsFunc( + name, + func(c rune) bool { + return !unicode.IsLetter(c) && !unicode.IsNumber(c) && c != '_' + }, + ) { + caseAdaptedWord := "" + for j, subWord := range camelcase.Split(word) { + caseAdaptedWord += fixCase(subWord, i == 0 && j == 0 && !exported) + } + identifier += caseAdaptedWord + } + + if strings.IndexFunc( + identifier, + func(c rune) bool { + return unicode.IsNumber(c) + }, + ) == 0 { + identifier = "_" + identifier + } + + if identifier == "" || identifier == "_" { + identifier = "Identifier" + } + + // If name already exists, add an integer suffix to name. Start with "1" and increment + // by 1 until an unused name is found. Example: if name FooBar was generated four times + // , the first instance would be called FooBar, then the next would be FooBar1, the next + // FooBar2 and the last would be assigned a name of FooBar3. We do this to guarantee we + // don't use duplicate names for different logical entities. + for k, baseName := 1, identifier; blacklist[identifier] || reservedKeyWords[identifier]; { + identifier = fmt.Sprintf("%v%v", baseName, k) + k++ + } + blacklist[identifier] = true + return +} + +func fixCase(word string, makeLower bool) string { + if word == "" { + return "" + } + if makeLower { + return strings.ToLower(word) + } + upper := strings.ToUpper(word) + if commonInitialisms[upper] { + return upper + } + firstRune, size := utf8.DecodeRuneInString(word) + remainingString := word[size:] + return string(unicode.ToUpper(firstRune)) + remainingString +} + +// Returns the indefinite article (in English) for a the given noun, which is +// 'an' for nouns beginning with a vowel, otherwise 'a'. +func IndefiniteArticle(noun string) string { + if strings.ContainsRune("AEIOUaeiou", rune(noun[0])) { + return "an" + } + return "a" +} diff --git a/vendor/github.com/taskcluster/pulse-go/LICENSE b/vendor/github.com/taskcluster/pulse-go/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/pulse-go/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/pulse-go/pulse/doc.go b/vendor/github.com/taskcluster/pulse-go/pulse/doc.go new file mode 100644 index 0000000..7a86428 --- /dev/null +++ b/vendor/github.com/taskcluster/pulse-go/pulse/doc.go @@ -0,0 +1,261 @@ +// Package pulse provides operations for consuming mozilla pulse messages (see +// https://pulse.mozilla.org/). +// +// For users that are interested in publishing messages, or having lower level +// control of the amqp interactions with pulse, take a look at +// http://godoc.org/github.com/streadway/amqp. This library is built on top of +// the amqp package. +// +// For a user that is simply interesting in consuming pulse messages without +// wishing to acquire a detailed understanding of how pulse.mozilla.org has +// been designed, or how AMQP 0.9.1 works, this client provides basic utility +// methods to get you started off quickly. +// +// Please note that parent package "github.com/taskcluster/pulse-go" provides a +// very simple command line interface into this library too, which can be +// called directly from a shell, for example, so that the user requires no go +// programming expertise, and can directly write e.g. shell scripts that +// process pulse messages. +// +// To get started, we have created an example program which uses this library. +// The source code for this example is available at +// https://github.com/taskcluster/pulse-go/blob/master/pulsesniffer/pulsesniffer.go. +// Afterwards, we will describe how it works. Do not worry if none of it makes +// sense now. By the end of this overview it will all be explained. +// +// // Package pulsesniffer provides a simple example program that listens to some +// // real world pulse messages. +// package main +// +// import ( +// "fmt" +// "github.com/taskcluster/pulse-go/pulse" +// "github.com/streadway/amqp" +// ) +// +// func main() { +// // Passing all empty strings: +// // empty user => use PULSE_USERNAME env var +// // empty password => use PULSE_PASSWORD env var +// // empty url => connect to production +// conn := pulse.NewConnection("", "", "") +// conn.Consume( +// "taskprocessing", // queue name +// func(message interface{}, delivery amqp.Delivery) { // callback function to pass messages to +// fmt.Println("Received from exchange " + delivery.Exchange + ":") +// fmt.Println(string(delivery.Body)) +// fmt.Println("") +// delivery.Ack(false) // acknowledge message *after* processing +// }, +// 1, // prefetch 1 message at a time +// false, // don't auto-acknowledge messages +// pulse.Bind( // routing key and exchange to get messages from +// "*.*.*.*.*.*.gaia.#", +// "exchange/taskcluster-queue/v1/task-defined"), +// pulse.Bind( // another routing key and exchange to get messages from +// "*.*.*.*.*.aws-provisioner.#", +// "exchange/taskcluster-queue/v1/task-running")) +// conn.Consume( // a second workflow to manage concurrently +// "", // empty name implies anonymous queue +// func(message interface{}, delivery amqp.Delivery) { // simpler callback than before +// fmt.Println("Buildbot message received") +// fmt.Println("") +// }, +// 1, // prefetch +// true, // auto acknowledge, so no need to call delivery.Ack +// pulse.Bind( // routing key and exchange to get messages from +// "#", // get *all* normalized buildbot messages +// "exchange/build/normalized")) +// // wait forever +// forever := make(chan bool) +// <-forever +// } +// The first thing we need to do is provide connection details for connecting +// to the pulse server, which we do like this: +// +// conn := pulse.NewConnection("", "", "") +// +// In this example, the provided strings (username, password, url) have all +// been left empty. This is because by default, if you provide no username or +// password, the NewConnection function will inspect environment variables +// PULSE_USERNAME and PULSE_PASSWORD, and an empty url will trigger the library +// to use the current production url. Another example call could be: +// +// conn := pulse.NewConnection("guest", "guest", "amqp://localhost:5672/") +// +// Typically we would set the username and password credentials via environment +// variables to avoid hardcoding them in the go code. For more details about +// managing the username, password and amqp url, see the documentation for the +// NewConnection function. +// +// A call to NewConnection does not actually create a connection to the pulse +// server, it simply prepares the data that will be needed when we finally make +// the connection. Users and passwords can be created by going to the Pulse +// Guardian (https://pulse.mozilla.org) and registering an account. +// +// You will see in the code above, that after creating a connection, there is +// only one more method we call - Consume - which we use for processing +// messages. This is the heart of the pulse library, and where all of the +// action happens. +// +// In pulse, all messages are delivered to "topic exchanges" and the way to +// receive these messages is to request the ones you are interested in are +// copied onto a queue you can read from, and then to read them from the queue. +// This is called binding. To bind messages from an exchange to a queue, you +// specify the name of the exchange you want to receive messages from, and a +// matching criteria to define the ones you want. The matching process is +// handled by routing keys, which will now be explained. +// +// Each message that arrives on an exchange has a "routing key" signature. The +// routing key comprises of several fields. For an example, see: +// https://docs.taskcluster.net/reference/platform/queue/exchanges#taskDefined. +// The fields are delimited by dots, and therefore the routing key of a message +// is represented as a '.' delimited string. In order to select the messages +// on an exchange that you wish to receive, you specify a matching routing key. +// For each field of the routing key, you can either match against a specific +// value, or match all entries with the '*' wildcard. Above, we specified the +// following routing key and exchange: +// +// pulse.Bind( // routing key and exchange to get messages from +// "*.*.*.*.*.*.gaia.#", +// "exchange/taskcluster-queue/v1/task-defined"), +// +// This would match all messages on the exchange +// "exchange/taskcluster-queue/v1/task-defined" which have a workerType of +// "gaia" (see the taskDefined link above). Notice also the '#' at the end of +// the string. This means "match all remaining fields" and can be used to match +// whatever comes after. +// +// To see the list of available exchanges on pulse, visit +// https://wiki.mozilla.org/Auto-tools/Projects/Pulse/Exchanges. +// +// After deciding which exchanges you are interested in, you need a queue to +// have them copied onto. This is also handled by the Consume method, with the +// first argument being the name of the queue to use. You will notice above +// there are two types of queues we create: named queues, and unnamed queues: +// +// conn.Consume( +// "taskprocessing", // queue name +// +// conn.Consume( // a second workflow to manage concurrently +// "", // empty name implies anonymous queue +// +// To understand the difference, first we need to explain the different +// scenarios in which you might want to use them. +// +// Scenario 1) You have one client reading from the queue, and when you +// disconnect, you don't want your queue to receive any more messages +// +// Scenario 2) you have multiple clients that want to feed from the same queue +// (e.g. when multiple workers can perform the same task, and whichever one +// pops the message off the queue first should process it) +// +// Scenario 3) you only have a single client reading from the queue, but if you +// go offline (crash, network interrupts etc) then you want pulse to keep +// updating your queue so your missed messages are there when you get back. +// +// In scenario 1 above, your client only uses the queue for the scope of the +// connection, and as soon as it disconnects, does not require the queue any +// further. In this case, an unnamed queue can be created, by passing "" as the +// queue name. When the connection closes, the AMQP server will automatically +// delete the queue. +// +// In scenarios 2 it is useful to have a friendly name for the queue that can +// be shared by all the clients using it. The queue also should not be deleted +// when one client disconnects, it needs to live indefinitely. By providing a +// name for the queue, this signifies to the pulse library, that the queue +// should persist after a disconnect, and pulse should continue to populate the +// queue, even if no pulse clients are connected to consume the messages. +// Please note eventually the Pulse Guardian will delete your queue if you +// leave it collecting messages without consuming them. +// +// Scenario 3 is essentially the same as scenario 2 but with one consumer only. +// Again, a named queue is required. +// +// So, we're nearly done now. We now have a means to consume messages, by +// calling the Consume method, and specifying a queue name, some bindings of +// exchanges and routing keys, but how to actually process messages arriving on +// the queue? +// +// You will notice the Consume method takes a callback function. This can be an +// inline function, or point to any available function in your go code. You +// simply need to have a function that accepts an amqp.Delivery input, and pass +// it into the Consume method. Above, we did it like this: +// +// func(message interface{}, delivery amqp.Delivery) { // callback function to pass messages to +// fmt.Println("Received from exchange " + delivery.Exchange + ":") +// fmt.Println(string(delivery.Body)) +// fmt.Println("") +// delivery.Ack(false) // acknowledge message *after* processing +// }, +// +// The two parameters of the callback function we have created are the message +// object, and the delivery object. The message object is the pulse message, +// but unmarshaled into an interface{}. Since the pulse messages are all json +// messages, the pulse library unmarshals it and give you back a go object with +// its contents. Please note if you require that the json is unmarshaled into +// something more specific than interface{}, such as a custom class, this is +// possible, and will be explained in the next paragraph. The other parameter, +// the delivery object, is an underlying amqp library type, which gives you +// access to some meta data for the message. Please see +// http://godoc.org/github.com/streadway/amqp#Delivery for more information. +// Among other things, it provides you with delivery.Body, which is the raw +// json of the message. You can therefore choose if you want to process the raw +// json or the unmarshaled json in your callback method. +// +// You recall above that to describe the binding from an exchange to a queue +// with a given routing key, we specified pulse.Bind(routingKey, exchange) as a +// parameter of the Consume method. pulse.Bind(routingKey, exchange) returns an +// object of type Binding, where Binding is an interface. If you wish to +// unmarshal your json into something other than an interface{}, take a look at +// the Binding interface documentation +// (http://godoc.org/github.com/taskcluster/pulse-go/pulse#Binding). Instead of +// calling pulse.Bind(routingKey, exchange) you can provide your own Binding +// interface implementation which can enable custom handling of exchange names, +// routing keys, and unmarshaling of objects. The taskcluster go client relies +// heavily on this, for example. See +// http://godoc.org/github.com/taskcluster/taskcluster-client-go/queueevents#example-package--TaskclusterSniffer +// for inspiration. +// +// In this example above, we simply output the information we receive, and then +// acknowledge receipt of the message. But why do we need to do this? To explain, +// take a look at the remaining parameters to Consume that we pass in. There +// are two more we have not discussed yet: they are the prefetch size (how many +// messages to fetch at once), and a bool to say whether to auto-acknowledge +// messages or not. +// +// 1, // prefetch 1 message at a time +// false, // don't auto-acknowledge messages +// +// When you acknowledge a message, it gets popped off the queue. If you don't +// auto-acknowledge, and also don't manually acknowledge, your queue is going +// to grow until it gets deleted by Pulse Guardian, so better to acknowledge +// those messages! Auto-acknowledge happens when you receive the message; if +// you crash after receiving it but before processing it, you may have a +// problem. If it is important not to lose messages in such a scenario, you can +// acknowledge manually *after* processing the message. See above: +// +// delivery.Ack(false) // acknowledge message *after* processing +// +// This is "more work" for you to do, but guarantees that you don't lose +// messages. To handle situation of crashing after processing, but before +// acknowledging, having an idempotent message processing function (the +// callback) should help avoid the problem of processing a message twice. +// +// Please note the Consume method will take care of connecting to the pulse +// server (if no connection has yet been established), creating an AMQP +// channel, creating or connecting to an existing queue, binding it to all the +// exchanges and routing keys that you specify, and spawning a dedicated go +// routine to process the messages from this queue and feed them back to the +// callback method you provide. +// +// The client is implemented in such a way that a new AMQP channel is created +// for each queue that you consume, and that a separate go routine handles +// calling the callback function you specify. This means you can take advantage +// of go's built in concurrency support, and call the Consume method as many +// times as you wish. +// +// The aim of this library is to shield users from this lower-level resource +// management, and provide a simple interface in order to quickly and easily +// develop components that can interact with pulse. +package pulse diff --git a/vendor/github.com/taskcluster/pulse-go/pulse/pulse.go b/vendor/github.com/taskcluster/pulse-go/pulse/pulse.go new file mode 100644 index 0000000..d1e8388 --- /dev/null +++ b/vendor/github.com/taskcluster/pulse-go/pulse/pulse.go @@ -0,0 +1,357 @@ +package pulse + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "os" + "regexp" + + "github.com/pborman/uuid" + "github.com/streadway/amqp" +) + +// Utility method used for checking an error condition, and failing with a given +// error message if the error is not nil. msg should contain a description of +// what activity could not be performed as required. +func (err PulseError) Error() string { + msg := err.Message + lle := err.LowerLevelError + if msg != "" { + if lle != nil { + return fmt.Sprintf("%s: %s", msg, lle) + } + return fmt.Sprintf("%s", msg) + } + if lle != nil { + return fmt.Sprintf("Pulse library error occurred caused by:\n%s", lle) + } + return "Unknown Pulse Library error has occurred! No information available! :D" +} + +// Utility function for generating a PulseError +func Error(err error, msg string) PulseError { + return PulseError{ + Message: msg, + LowerLevelError: err, + } +} + +// PulseError is used for describing problems that occurred when interacting +// with Pulse, caused by a lower-level error +type PulseError struct { + Message string + LowerLevelError error +} + +// PulseQueue manages an underlying AMQP queue, and provides methods for +// closing, deleting, pausing and resuming queues. +type PulseQueue struct { +} + +// Connection manages the underlying AMQP connection, and provides an interface +// for performing further actions, such as creating a queue. +type Connection struct { + User string + Password string + URL string + AMQPConn *amqp.Connection + connected bool + closedAlert chan amqp.Error +} + +// match applies the regular expression regex to string text, and only replaces +// with $1 if there is a match, otherwise if no match, returns an empty string +func match(regex, text string) string { + if matched, _ := regexp.MatchString(regex, text); matched { + return regexp.MustCompile(regex).ReplaceAllString(text, "$1") + } + return "" +} + +// NewConnection prepares a Connection object with a username, password and an +// AMQP URL, but does not actually make an outbound connection to the service. +// An actual network connection will be made the first time the Consume method +// is called. +// +// The logic for deriving the AMQP url is as follows: +// +// If the provided amqpUrl is a non-empty string, it will be used to set the +// AMQP URL. Otherwise, production will be used +// ("amqps://pulse.mozilla.org:5671") +// +// The pulse user is determined as follows: +// +// If the provided pulseUser is a non-empty string, it will be used for AMQP +// connection user. Otherwise, if the amqlUrl contains a user, it will be +// used. Otherwise, if environment variable PULSE_USERNAME is non empty, it +// will be used. Otherwise, the value "guest" will be used. +// +// The pulse password is determined as follows: +// +// If the provided pulsePassword is a non-empty string, it will be used for +// AMQP connection password. Otherwise, if the amqlUrl contains a password, it +// will be used. Otherwise, if environment variable PULSE_PASSWORD is non +// empty, it will be used. Otherwise, the value "guest" will be used. +// +// Finally, the AMQP url is adjusted, by stripping out any user/password +// contained inside it, and then embedding the derived username and password +// above. +// +// Typically, a call to this method would look like: +// +// conn := pulse.NewConnection("", "", "") +// +// whereby the client program would export PULSE_USERNAME and PULSE_PASSWORD +// environment variables before calling the go program, and the empty url would +// signify that the client should connect to the production instance. +func NewConnection(pulseUser string, pulsePassword string, amqpUrl string) Connection { + if amqpUrl == "" { + amqpUrl = "amqps://pulse.mozilla.org:5671" + } + if pulseUser == "" { + // Regular expression to pull out username from amqp url + pulseUser = match("^.*://([^:@/]*)(:[^@]*@|@).*$", amqpUrl) + } + if pulsePassword == "" { + // Regular expression to pull out password from amqp url + pulsePassword = match("^.*://[^:@/]*:([^@]*)@.*$", amqpUrl) + } + if pulseUser == "" { + pulseUser = os.Getenv("PULSE_USERNAME") + } + if pulsePassword == "" { + pulsePassword = os.Getenv("PULSE_PASSWORD") + } + if pulseUser == "" { + pulseUser = "guest" + } + if pulsePassword == "" { + pulsePassword = "guest" + } + + // now substitute in real username and password into url... + amqpUrl = regexp.MustCompile("^(.*://)([^@/]*@|)([^@]*)(/.*|$)").ReplaceAllString(amqpUrl, "${1}"+pulseUser+":"+pulsePassword+"@${3}${4}") + + return Connection{ + User: pulseUser, + Password: pulsePassword, + URL: amqpUrl} +} + +// connect is called internally, lazily, the first time Consume is called. +// TODO: need to make sure this is properly synchronised. +func (c *Connection) connect() error { + var err error + c.AMQPConn, err = amqp.Dial(c.URL) + if err != nil { + return Error(err, "Failed to connect to RabbitMQ") + } + c.connected = true + return nil +} + +// Binding interface allows you to create custom types to describe exchange / +// routing key combinations. For example Binding types are generated in Task +// Cluster go client to avoid a library user referencing a non existent +// exchange, or an invalid routing key. +type Binding interface { + + // This should return a routing key string for matching pulse messages + RoutingKey() string + + // This should return the fully qualified name of the pulse exchange to + // bind messages from + ExchangeName() string + + // This should return a pointer to a new object for unmarshaling matching + // pulse messages into + NewPayloadObject() interface{} +} + +// Convenience private (unexported) type for binding a routing key/exchange +// to a queue using plain strings for describing the exchange and routing key +type simpleBinding struct { + // copy of the static routing key + rk string + // copy of the static fully qualified exchange name + en string +} + +// Convenience function for returning a Binding for the given routing key and +// exchange strings, which can be passed to the Consume method of *Connection. +// Typically this is used if you wish to refer to exchanges and routing keys +// with explicit strings, rather than generated types (e.g. Task Cluster go +// client generates custom types to avoid invalid exchange names or invalid +// routing keys). See the Consume method for more information. +func Bind(routingKey, exchangeName string) Binding { + return &simpleBinding{rk: routingKey, en: exchangeName} +} + +// RoutingKey() blindly returns the routing key the simpleBinding was created +// with in the Bind function above +func (s simpleBinding) RoutingKey() string { + return s.rk +} + +// ExchangeName() blindly returns the exchange name the simpleBinding was +// created with in the Bind function above +func (s simpleBinding) ExchangeName() string { + return s.en +} + +// we unmarshal into an interface{} since we don't know anything about the +// json payload +func (s simpleBinding) NewPayloadObject() interface{} { + return new(interface{}) +} + +// Consume is at the heart of the pulse library. After creating a connection +// with pulse.NewConnection(...) above, you can call the Consume method to +// register a queue, set a callback function to be called with each message +// received on the queue and bind the queue to a list of exchange / routing key +// pairs. See the package overview for a walkthrough example. A go routine will +// be spawned to take care of calling the callback function, and a new AMQP +// channel will be created behind-the-scenes to handle the processing. +// +// queueName is the name of the queue to connect to or create; leave empty for +// an anonymous queue that will get auto deleted after disconnecting, or +// provide a name for a long-lived queue. callback specifies the function to +// be called with each message that arrives. prefetch specifies how many +// messages should be read from the queue at a time. autoAck is a bool to +// specify if auto acknowledgements should be sent or not; if not +// auto-acknowledging, remember to ack / nack in your callback method. +// bindings is a variadic input of the exchange names / routing keys that you +// wish pulse to copy to your queue. +func (c *Connection) Consume( + queueName string, + callback func(interface{}, amqp.Delivery), + prefetch int, + autoAck bool, + bindings ...Binding, +) ( + PulseQueue, + error, +) { + pulseQueue := PulseQueue{} + + // TODO: this needs to be synchronised + if !c.connected { + c.connect() + } + + ch, err := c.AMQPConn.Channel() + if err != nil { + return pulseQueue, Error(err, "Failed to open a channel") + } + + // keep a map from exchange name to exchange object, so later we can + // unmarshal pulse messages into correct object from the exchange name + // in the amqp.Delivery object to get back to Binding, and thus to + // Binding.NewPayloadObject() + bindingLookup := make(map[string]Binding, len(bindings)) + + for i := range bindings { + err = ch.ExchangeDeclarePassive( + bindings[i].ExchangeName(), // name + "topic", // type + false, // durable + false, // auto-deleted + false, // internal + false, // no-wait + nil, // arguments + ) + if err != nil { + return pulseQueue, Error(err, "Failed to passively declare exchange "+bindings[i].ExchangeName()) + } + // bookkeeping... + bindingLookup[bindings[i].ExchangeName()] = bindings[i] + } + + var q amqp.Queue + if queueName == "" { + q, err = ch.QueueDeclare( + "queue/"+c.User+"/"+uuid.New(), // name + false, // durable + // unnamed queues get deleted when disconnected + true, // delete when usused + // unnamed queues are exclusive + true, // exclusive + false, // no-wait + nil, // arguments + ) + } else { + q, err = ch.QueueDeclare( + "queue/"+c.User+"/"+queueName, // name + false, // durable + false, // delete when usused + false, // exclusive + false, // no-wait + nil, // arguments + ) + } + if err != nil { + return pulseQueue, Error(err, "Failed to declare queue") + } + + for i := range bindings { + log.Printf("Binding %s to %s with routing key %s", q.Name, bindings[i].ExchangeName(), bindings[i].RoutingKey()) + err = ch.QueueBind( + q.Name, // queue name + bindings[i].RoutingKey(), // routing key + bindings[i].ExchangeName(), // exchange + false, + nil) + if err != nil { + return pulseQueue, Error(err, "Failed to bind a queue") + } + } + + eventsChan, err := ch.Consume( + q.Name, // queue + "", // consumer + autoAck, // auto ack + false, // exclusive + false, // no local + false, // no wait + nil, // args + ) + if err != nil { + return pulseQueue, Error(err, "Failed to register a consumer") + } + + go func() { + for i := range eventsChan { + payload := i.Body + binding, ok := bindingLookup[i.Exchange] + if !ok { + panic(errors.New(fmt.Sprintf("ERROR: Message received for an unknown exchange '%v' - not sure how to process", i.Exchange))) + } + payloadObject := binding.NewPayloadObject() + err := json.Unmarshal(payload, payloadObject) + if err != nil { + fmt.Printf("Unable to unmarshal json payload into object:\nPayload:\n%v\nObject: %T\n", string(payload), payloadObject) + } + callback(payloadObject, i) + } + fmt.Println("AMQP channel closed - has the connection dropped?") + }() + return pulseQueue, nil +} + +// TODO: not yet implemented +func (pq *PulseQueue) Pause() { +} + +// TODO: not yet implemented +func (pq *PulseQueue) Delete() { +} + +// TODO: not yet implemented +func (pq *PulseQueue) Resume() { +} + +// TODO: not yet implemented +func (pq *PulseQueue) Close() { +} diff --git a/vendor/github.com/taskcluster/slugid-go/LICENSE b/vendor/github.com/taskcluster/slugid-go/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/slugid-go/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/slugid-go/slugid/slugid.go b/vendor/github.com/taskcluster/slugid-go/slugid/slugid.go new file mode 100644 index 0000000..e843339 --- /dev/null +++ b/vendor/github.com/taskcluster/slugid-go/slugid/slugid.go @@ -0,0 +1,113 @@ +// Licensed under the Mozilla Public Licence 2.0. +// https://www.mozilla.org/en-US/MPL/2.0 +// +// ************** +// * Slugid API * +// ************** +// +// @)@) +// _|_| ( ) +// _(___,`\ _,--------------._ (( /`, )) +// `==` `*-_,' O `~._ ( ( _/ | ) ) +// `, : o } `~._.~` * ', +// \ - _ O - ,' +// | ; - - " ; o / +// | O o ,-` +// \ _,-:""""""'`:-._ - . O / +// `""""""~'` `._ _,-` +// """""" + +// A go (golang) module for generating v4 UUIDs and encoding them into 22 +// character URL-safe base64 slug representation (see [RFC 4648 sec. +// 5](http://tools.ietf.org/html/rfc4648#section-5)). +// +// Slugs are url-safe base64 encoded v4 uuids, stripped of base64 `=` padding. +// +// There are two methods for generating slugs - `slugid.V4()` and +// `slugid.Nice()`. +// +// V4 Slugs +// +// The `slugid.V4()` method returns a slug from a randomly generated v4 uuid. +// +// Nice slugs +// +// The `slugid.Nice()` method returns a v4 slug which conforms to a set of +// "nice" properties. At the moment the only "nice" property is that the slug +// starts with `[A-Za-f]`, which in turn implies that the first (most +// significant) bit of its associated uuid is set to 0. +// +// The purpose of the `slugid.Nice()` method is to support having slugids which +// can be used in more contexts safely. Regular slugids can safely be used in +// urls, and for example in AMQP routing keys. However, slugs beginning with `-` +// may cause problems when used as command line parameters. +// +// In contrast, slugids generated by the `slugid.Nice()` method can safely be +// used as command line parameters. This comes at a cost to entropy (121 bits vs +// 122 bits for regular v4 slugs). +// +// Choosing which slug generation method to use +// +// Slug consumers should consider carefully which of these two slug generation +// methods to call. Is it more important to have maximum entropy, or to have +// slugids that do not need special treatment when used as command line +// parameters? This is especially important if you are providing a service which +// supplies slugs to unexpecting tool developers downstream, who may not realise +// the risks of using your regular v4 slugs as command line parameters, especially +// since this would arise only as an intermittent issue (one time in 64). +// +// Generated slugs take the form `[A-Za-z0-9_-]{22}`, or more precisely: +// +// `slugid.V4()` slugs conform to +// `[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]` +// +// `slugid.Nice()` slugs conform to +// `[A-Za-f][A-Za-z0-9_-]{7}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]` +// +// RFC 4122 defines the setting of 6 bits of the v4 UUID which implies v4 slugs +// provide 128 - 6 = 122 bits entropy. Due to the (un)setting of the first bit +// of "nice" slugs, nice slugs provide therefore 121 bits entropy. +package slugid + +import ( + "encoding/base64" + + "github.com/pborman/uuid" +) + +// Returns the given uuid.UUID object as a 22 character slug. This can be a +// regular v4 slug or a "nice" slug. +func Encode(uuid_ uuid.UUID) string { + return base64.URLEncoding.EncodeToString(uuid_)[:22] // Drop '==' padding +} + +// Returns the uuid.UUID object represented by the given v4 or "nice" slug, or +// nil if it cannot be decoded +func Decode(slug string) uuid.UUID { + uuid_, err := base64.URLEncoding.DecodeString(slug + "==") // b64 padding + if err != nil { + return nil + } + return uuid_ +} + +// Returns a randomly generated uuid v4 compliant slug +func V4() string { + return base64.URLEncoding.EncodeToString(uuid.NewRandom())[:22] // Drop '==' padding +} + +// Returns a randomly generated uuid v4 compliant slug which conforms to a set +// of "nice" properties, at the cost of some entropy. Currently this means one +// extra fixed bit (the first bit of the uuid is set to 0) which guarantees the +// slug will begin with [A-Za-f]. For example such slugs don't require special +// handling when used as command line parameters (whereas non-nice slugs may +// start with `-` which can confuse command line tools). +// +// Potentially other "nice" properties may be added in future to further +// restrict the range of potential uuids that may be generated. +func Nice() string { + rawBytes := uuid.NewRandom() + // unset most significant bit of first byte to ensure slug starts with [A-Za-f] + rawBytes[0] &= 0x7f + return base64.URLEncoding.EncodeToString(rawBytes)[:22] // Drop '==' padding +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.eslintrc b/vendor/github.com/taskcluster/taskcluster-lib-urls/.eslintrc new file mode 100644 index 0000000..528d46d --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.eslintrc @@ -0,0 +1,3 @@ +{ + "extends": "eslint-config-taskcluster" +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.gitignore b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gitignore new file mode 100644 index 0000000..edf2e2c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gitignore @@ -0,0 +1,146 @@ +/user-config.yml +lib/ +.test/ + +# Logs +logs +*.log +npm-debug.log* + +# Runtime data +pids +*.pid +*.seed + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (http://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules +jspm_packages + +# Optional npm cache directory +.npm + +# npm package lock file +package-lock.json + +# Optional REPL history +.node_repl_history + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.gometalinter.json b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gometalinter.json new file mode 100644 index 0000000..291500f --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gometalinter.json @@ -0,0 +1,25 @@ +{ + "Enable": [ + "deadcode", + "errcheck", + "goconst", + "goimports", + "golint", + "gosimple", + "gotype", + "gotypex", + "ineffassign", + "interfacer", + "maligned", + "megacheck", + "misspell", + "nakedret", + "safesql", + "staticcheck", + "structcheck", + "unconvert", + "unparam", + "unused", + "varcheck" + ] +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.taskcluster.yml b/vendor/github.com/taskcluster/taskcluster-lib-urls/.taskcluster.yml new file mode 100644 index 0000000..bc4d6c8 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.taskcluster.yml @@ -0,0 +1,173 @@ +version: 0 +metadata: + name: "taskcluster-lib-urls test" + description: "Library for building taskcluster urls" + owner: "{{ event.head.user.email }}" + source: "{{ event.head.repo.url }}" + +tasks: + - provisionerId: "{{ taskcluster.docker.provisionerId }}" + workerType: "{{ taskcluster.docker.workerType }}" + extra: + github: + env: true + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: "node:10" + command: + - "/bin/bash" + - "-lc" + - "git clone {{event.head.repo.url}} repo && cd repo && git checkout {{event.head.sha}} && yarn install && yarn test" + metadata: + name: "taskcluster-lib-urls node.js test" + description: "Library for building taskcluster urls" + owner: "{{ event.head.user.email }}" + source: "{{ event.head.repo.url }}" + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - push + branches: + - master + scopes: + - auth:aws-s3:read-write:taskcluster-raw-docs/taskcluster-lib-urls/ + payload: + maxRunTime: 3600 + image: taskcluster/upload-project-docs:latest + features: + taskclusterProxy: + true + command: + - /bin/bash + - '--login' + - '-cx' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + export DEBUG=* DOCS_PROJECT=taskcluster-lib-urls DOCS_TIER=libraries DOCS_FOLDER=docs DOCS_README=README.md + upload-project-docs + metadata: + name: "taskcluster-lib-urls docs upload" + description: "Upload documentation for this project" + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'golang:1.10' + command: + - /bin/bash + - '-c' + - | + mkdir -p /go/src/github.com/taskcluster/taskcluster-lib-urls + cd /go/src/github.com/taskcluster/taskcluster-lib-urls + git clone {{event.head.repo.url}} . + git config advice.detachedHead false + git checkout {{event.head.sha}} + go get -v -d -t ./... + go test -v -race ./... + go get -u github.com/alecthomas/gometalinter + gometalinter --install + gometalinter + metadata: + name: "taskcluster-lib-urls go test" + description: Run library test suite - golang 1.10 + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'maven' + command: + - /bin/bash + - '-c' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + mvn -X -e install + metadata: + name: taskcluster-lib-urls java test + description: Run library test suite - java + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'python:2.7' + command: + - /bin/bash + - '-c' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + pip install tox + tox -e py27 + metadata: + name: "taskcluster-lib-urls python 2.7 test" + description: Run library test suite - python2.7 + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'python:3.6' + command: + - /bin/bash + - '-c' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + pip install tox + tox -e py36 + metadata: + name: taskcluster-lib-urls python 3.6 test + description: Run library test suite - python3.6 + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.travis.yml b/vendor/github.com/taskcluster/taskcluster-lib-urls/.travis.yml new file mode 100644 index 0000000..6fa9a8b --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.travis.yml @@ -0,0 +1,30 @@ +language: node_js +sudo: false +node_js: +- '8' +- '10' +addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.8 +caches: + yarn: true +env: + global: + - DEBUG='taskcluster-lib-urls test' + - CXX=g++-4.8 +notifications: + irc: + secure: ub7PLmoLr7fPYdORBBgH1VSF6esn7+XlNXh8zm3NBOf2RAsMAp+Hk2r+yD4PI72+tGiPbUGsKKsCHh6+Xq3RZc9h67bfMS8KLoZGuogap12i0MnwM5x03lLCRhLvW+r1Y35u8NOEOuaQXfPL6UmQ2EGzDzrjVHXHotFnTqPFSFxeIAnAi9BSjqjy/yBstn1ybX0etobc+Mm9CRubD2eblKVwAYJi98yWLz+w8ou3BXUA/9iHZFyda60SJ8BTqxhnh9J1UYESvEoUg0dfO0Tj+++xQBz/mFM05SuVw5fzp4u5rECGkcFwanyeubHjzxvBXn83gV1mQpe9F1kAXsGNHt/1+YeIjo6nsOHdjpzScHfV5Nf063n1RNOFgQc3l5hhWmEFX+qdQFE1yxMXou00H/7iXlA8dUvjHCswqsKOFHiH3v6VQQ5cXtgu8jB1N0ZWPPeS1xIyQlHWXdD2uxKp+PyCnlKEjorIhRgFyr6kdPWRUQW45OJVvaMHs+a5DTZntsLf524kmObkJAmTdfy8Amq7XIKIYzHEo0KFQA4ukKfWwXqu7ESEgoXjI1rIERa9qzGBHR1prPZsBoDrctppL949/RsM48oT0PB7BGVQI9pwGI4oFrBwH/dp5w45AFpIB32r2IlLdsdP5iNiVtGDgVLn03KMvQgnVXP4cK1FEwg= +deploy: + provider: npm + email: taskcluster-accounts@mozilla.com + skip_cleanup: true + api_key: + secure: Af1jZIlFU7rxuJS12fb5N4yyz0VHsvgS7haSr5+a9yrMuxWpxLPdDSVlB33KOyhKoUYUY+ayJQqN2I+WYtGf7BYHGgPe6kNR5cCOseH1cRfmPrvNhQEWMrc7stWa6cHOpbI/3gZn6jSigKf6vLm4Wr8fOR+TLQ6tDaKzpqoZW8W6TwJyeM+9BQUY3aHuR6qb+ItpfrtR12vsOtLqDG7KImo0NhIb3ae91/HjC+EqL0UUJWYiqxg7Nkk2ziCWPYL1b554cKXf5HjOQUzWolHDEDybS20H8K20PSMUk3SZ79TKdqI3asXc8+V6AGCmgnq6cT4zLBk0gx6PoZWU6y3/OI1IYRSs7BGdrCiBGRlgAGSXnHoVWA6NDbQrkUrBVmhBKjnhyhO/q8la6fAPqHP2LgEQ6/RvT8XS9y1jpUwt233P+rjsXCW+nyF09cZAZ/CTCwGFuyMhtQZycA5SFE/IDZ3B+B5VMK7+HnC8j2QS/OKaNZXJivvIDpZzKXDz3TsHIlOY2WgfzvHXgCGnuRSNdx6ulgW9RvNOI5/AeWOvdFb3rnb8y8d3jllk1GUDcwycJ0Y702QuwBTzGnrCFl8AR8uEwUY29QSlPgXie2mdweDt1A2xM8iQ9IQeC1nlm71f9msSsa3PDzchJ6R5Shafuiz59UjkfDsekJ6sWvXTkbM= + on: + tags: true + repo: taskcluster/taskcluster-lib-urls + node: '10' diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/CODE_OF_CONDUCT.md b/vendor/github.com/taskcluster/taskcluster-lib-urls/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..2d33e6b --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Mozilla Community Participation Guidelines + +The most recent version of the Mozilla Community Participation Guideline can always be found here: https://www.mozilla.org/en-US/about/governance/policies/participation/ diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/CONTRIBUTING.md b/vendor/github.com/taskcluster/taskcluster-lib-urls/CONTRIBUTING.md new file mode 100644 index 0000000..3ab3a82 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We welcome pull requests from everyone. We do expect everyone to adhere to the [Mozilla Community Participation Guidelines][participation]. + +If you're trying to figure out what to work on, here are some places to find suitable projects: +* [Good first bugs][goodfirstbug]: these are scoped to make it easy for first-time contributors to get their feet wet with Taskcluster code. +* [Mentored bugs][bugsahoy]: these are slightly more involved projects that may require insight or guidance from someone on the Taskcluster team. +* [Full list of open issues][issues]: everything else + +If the project you're interested in working on isn't covered by a bug or issue, or you're unsure about how to proceed on an existing issue, it's a good idea to talk to someone on the Taskcluster team before you go too far down a particular path. You can find us in the #taskcluster channel on [Mozilla's IRC server][irc] to discuss. You can also simply add a comment to the issue or bug. + +Once you've found an issue to work on and written a patch, submit a pull request. Some things that will increase the chance that your pull request is accepted: + +* Follow our [best practices][bestpractices]. +* This includes [writing or updating tests][testing]. +* Write a [good commit message][commit]. + +Welcome to the team! + +[participation]: https://www.mozilla.org/en-US/about/governance/policies/participation/ +[issues]: ../../issues +[bugsahoy]: https://www.joshmatthews.net/bugsahoy/?taskcluster=1 +[goodfirstbug]: http://www.joshmatthews.net/bugsahoy/?taskcluster=1&simple=1 +[irc]: https://wiki.mozilla.org/IRC +[bestpractices]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices +[testing]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/testing +[commit]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/commits + diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/LICENSE b/vendor/github.com/taskcluster/taskcluster-lib-urls/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/MANIFEST.in b/vendor/github.com/taskcluster/taskcluster-lib-urls/MANIFEST.in new file mode 100644 index 0000000..2451f52 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/MANIFEST.in @@ -0,0 +1,4 @@ +include LICENSE +global-exclude *.py[co] +include specification.yml +include package.json diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/README.md b/vendor/github.com/taskcluster/taskcluster-lib-urls/README.md new file mode 100644 index 0000000..10fb412 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/README.md @@ -0,0 +1,252 @@ +# Taskcluster URL Building Library + +[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0) + +A simple library to generate URLs for various Taskcluster resources across our various deployment methods. + +This serves as both a simple shim for projects that use JavaScript but also is the reference implementation for +how we define these paths. + +URLs are defined in the 'Taskcluster URL Format' document. + +Changelog +--------- +View the changelog on the [releases page](https://github.com/taskcluster/taskcluster-lib-urls/releases). + +Requirements +------------ + +This is tested on and should run on any of Node.js `{8, 10}`. + +JS Usage +-------- +[![Node.js Build Status](https://travis-ci.org/taskcluster/taskcluster-lib-urls.svg?branch=master)](https://travis-ci.org/taskcluster/taskcluster-lib-urls) +[![npm](https://img.shields.io/npm/v/taskcluster-lib-urls.svg?maxAge=2592000)](https://www.npmjs.com/package/taskcluster-lib-urls) + +This package exports several methods for generating URLs conditionally based on +a root URL, as well as a few helper classes for generating URLs for a pre-determined +root URL: + +* `api(rootUrl, service, version, path)` -> `String` +* `apiReference(rootUrl, service, version)` -> `String` +* `docs(rootUrl, path)` -> `String` +* `exchangeReference(rootUrl, service, version)` -> `String` +* `schema(rootUrl, service, schema)` -> `String` +* `apiManifestSchema(rootUrl, version)` -> `String` +* `apiReferenceSchema(rootUrl, version)` -> `String` +* `exchangesReferenceSchema(rootUrl, version)` -> `String` +* `metadataMetaschema(rootUrl)` -> `String` +* `ui(rootUrl, path)` -> `String` +* `apiManifest(rootUrl)` -> `String` +* `testRootUrl()` -> `String` +* `withRootUrl(rootUrl)` -> `Class` instance for above methods + +When the `rootUrl` is `https://taskcluster.net`, the generated URLs will be to the Heroku cluster. Otherwise they will follow the +[spec defined in this project](https://github.com/taskcluster/taskcluster-lib-urls/tree/master/docs/urls-spec.md). + +`testRootUrl()` is used to share a common fake `rootUrl` between various Taskcluster mocks in testing. +The URL does not resolve. + +```js +// Specifying root URL every time: +const libUrls = require('taskcluster-lib-urls'); + +libUrls.api(rootUrl, 'auth', 'v1', 'foo/bar'); +libUrls.schema(rootUrl, 'auth', 'v1/foo.yml'); // Note that schema names have versions in them +libUrls.apiReference(rootUrl, 'auth', 'v1'); +libUrls.exchangeReference(rootUrl, 'auth', 'v1'); +libUrls.ui(rootUrl, 'foo/bar'); +libUrls.apiManifest(rootUrl); +libUrls.docs(rootUrl, 'foo/bar'); +``` + +```js +// Specifying root URL in advance: +const libUrls = require('taskcluster-lib-urls'); + +const urls = libUrls.withRoot(rootUrl); + +urls.api('auth', 'v1', 'foo/bar'); +urls.schema('auth', 'v1/foo.yml'); +urls.apiReference('auth', 'v1'); +urls.exchangeReference('auth', 'v1'); +urls.ui('foo/bar'); +urls.apiManifest(); +urls.docs('foo/bar'); +``` + +If you would like, you can set this up via [taskcluster-lib-loader](https://github.com/taskcluster/taskcluster-lib-loader) as follows: + +```js +{ + libUrlss: { + require: ['cfg'], + setup: ({cfg}) => withRootUrl(cfg.rootURl), + }, +} +``` + +Test with: + +``` +yarn install +yarn test +``` + + +Go Usage +-------- + +[![GoDoc](https://godoc.org/github.com/taskcluster/taskcluster-lib-urls?status.svg)](https://godoc.org/github.com/taskcluster/taskcluster-lib-urls) + +The go package exports the following functions: + +```go +func API(rootURL string, service string, version string, path string) string +func APIReference(rootURL string, service string, version string) string +func Docs(rootURL string, path string) string +func ExchangeReference(rootURL string, service string, version string) string +func Schema(rootURL string, service string, name string) string +func APIManifestSchema(rootURL string, version string) string +func APIReferenceSchema(rootURL string, version string) string +func ExchangesReferenceSchema(rootURL string, version string) string +func MetadataMetaschema(rootURL string) string +func UI(rootURL string, path string) string +func APIManifest(rootURL string) string +``` + +Install with: + +``` +go install ./.. +``` + +Test with: + +``` +go test -v ./... +``` + +Python Usage +------------ + +You can install the python client with `pip install taskcluster-urls`; + +```python +import taskcluster_urls + +taskcluster_urls.api(root_url, 'auth', 'v1', 'foo/bar') +taskcluster_urls.schema(root_url, 'auth', 'v1/foo.yml') # Note that schema names have versions in them +taskcluster_urls.api_manifest_schema(root_url, 'v1') +taskcluster_urls.api_reference_schema(root_url, 'v1') +taskcluster_urls.exchanges_reference_schema(root_url, 'v1') +taskcluster_urls.metadata_metaschema(root_url, 'v1') +taskcluster_urls.api_reference(root_url, 'auth', 'v1') +taskcluster_urls.exchange_reference(root_url, 'auth', 'v1') +taskcluster_urls.ui(root_url, 'foo/bar') +taskcluster_urls.apiManifest(root_url) +taskcluster_urls.docs(root_url, 'foo/bar') + +And for testing, +```python +taskcluster_urls.test_root_url() +``` + +Test with: + +``` +tox +``` + +Java Usage +---------- + +[![JavaDoc](https://img.shields.io/badge/javadoc-reference-blue.svg)](http://taskcluster.github.io/taskcluster-lib-urls/apidocs) + +In order to use this library from your maven project, simply include it as a project dependency: + +``` + + ... + + ... + + org.mozilla.taskcluster + taskcluster-lib-urls + 1.0.0 + + + +``` + +The taskcluster-lib-urls artifacts are now available from the [maven central repository](http://central.sonatype.org/): + +* [Search Results](http://search.maven.org/#search|gav|1|g%3A%22org.mozilla.taskcluster%22%20AND%20a%3A%22taskcluster-lib-urls%22) +* [Directory Listing](https://repo1.maven.org/maven2/org/mozilla/taskcluster/taskcluster-lib-urls/) + +To use the library, do as follows: + +```java +import org.mozilla.taskcluster.urls.*; + +... + + URLProvider urlProvider = URLs.provider("https://mytaskcluster.acme.org"); + + String fooBarAPI = urlProvider.api("auth", "v1", "foo/bar"); + String fooSchema = urlProvider.schema("auth", "v1/foo.yml"); // Note that schema names have versions in them + String apiSchema = urlProvider.apiReferenceSchema("v1"); + String exchangesSchema = urlProvider.exchangesReferenceSchema("v1"); + String manifestSchema = urlProvider.apiManifestSchema("v1"); + String metaschema = urlProvider.metadataMetaschema(); + String authAPIRef = urlProvider.apiReference("auth", "v1"); + String authExchangesRef = urlProvider.exchangeReference("auth", "v1"); + String uiFooBar = urlProvider.ui("foo/bar"); + String apiManifest = urlProvider.apiManifest(); + String docsFooBar = urlProvider.docs("foo/bar"); + +... +``` + +Install with: + +``` +mvn install +``` + +Test with: + +``` +mvn test +``` + + +Releasing +--------- + +New releases should be tested on Travis and Taskcluster to allow for all supported versions of various languages to be tested. Once satisfied that it works, new versions should be created with +`npm version` rather than by manually editing `package.json` and tags should be pushed to Github. + +Make the Node release first, as Python's version depends on its `package.json`. This follows the typical tag-and-push-to-publish approach: + +```sh +$ npm version minor # or patch, or major +$ git push upstream +``` + +Once that's done, build the Python sdists (only possible by the [maintainers on pypi](https://pypi.org/project/taskcluster-urls/#files)): + +```sh +rm -rf dist/* +python setup.py sdist bdist_wheel +python3 setup.py bdist_wheel +pip install twine +twine upload dist/* +``` + +Make sure to update [the changelog](https://github.com/taskcluster/taskcluster-lib-urls/releases)! + +License +------- + +[Mozilla Public License Version 2.0](https://github.com/taskcluster/taskcluster-lib-urls/blob/master/LICENSE) diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/eclipse-formatter-config.xml b/vendor/github.com/taskcluster/taskcluster-lib-urls/eclipse-formatter-config.xml new file mode 100644 index 0000000..c519750 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/eclipse-formatter-config.xml @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/package.json b/vendor/github.com/taskcluster/taskcluster-lib-urls/package.json new file mode 100644 index 0000000..03c1bca --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/package.json @@ -0,0 +1,25 @@ +{ + "name": "taskcluster-lib-urls", + "version": "12.0.0", + "author": "Brian Stack ", + "description": "Build urls for taskcluster resources.", + "license": "MPL-2.0", + "scripts": { + "lint": "eslint src/*.js test/*.js", + "pretest": "yarn lint", + "test": "mocha test/*_test.js" + }, + "files": [ + "src" + ], + "repository": { + "type": "git", + "url": "https://github.com/taskcluster/taskcluster-lib-urls.git" + }, + "main": "./src/index.js", + "devDependencies": { + "eslint-config-taskcluster": "^3.1.0", + "js-yaml": "^3.11.0", + "mocha": "^5.1.1" + } +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/pom.xml b/vendor/github.com/taskcluster/taskcluster-lib-urls/pom.xml new file mode 100644 index 0000000..2225d1c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/pom.xml @@ -0,0 +1,226 @@ + + + 4.0.0 + org.mozilla.taskcluster + taskcluster-lib-urls + 1.0.1 + jar + Taskcluster Lib URLs (java port) + A simple library to generate URLs for various Taskcluster resources across our various deployment methods. + https://github.com/taskcluster/taskcluster-lib-urls + 2018 + + UTF-8 + 1.7 + 1.7 + master + + + https://github.com/taskcluster/taskcluster-lib-urls/tree/${scm.branch} + scm:git:git://github.com/taskcluster/taskcluster-lib-urls.git + scm:git:ssh://git@github.com/taskcluster/taskcluster-lib-urls.git + HEAD + + + Bugzilla + https://bugzilla.mozilla.org/buglist.cgi?product=Taskcluster&component=Platform%20Libraries&resolution=--- + + + Taskcluster + https://tools.taskcluster.net + + + + Mozilla Public License, version 2.0 + https://www.mozilla.org/MPL/2.0 + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + + + Pete Moore + pmoore@mozilla.com + https://github.com/petemoore + Mozilla + https://www.mozilla.org + + pmoore + + + + + + Taskcluster Tools + https://groups.google.com/forum/#!forum/mozilla.tools.taskcluster + + + + Mozilla + https://www.mozilla.org + + + + junit + junit + 4.12 + test + + + org.yaml + snakeyaml + 1.23 + + + org.projectlombok + lombok + 1.16.16 + + + + + + + net.revelc.code.formatter + formatter-maven-plugin + 2.7.1 + + + + format + + + + + ${project.basedir}/eclipse-formatter-config.xml + + org/mozilla/taskcluster/urls/*.java + + + + + + maven-javadoc-plugin + 2.9.1 + + ch.raffael.doclets.pegdown.PegdownDoclet + + ch.raffael.pegdown-doclet + pegdown-doclet + 1.1 + + true + + + + + org.eluder.coveralls + coveralls-maven-plugin + 3.1.0 + + + + org.codehaus.mojo + cobertura-maven-plugin + 2.6 + + xml + 256m + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20.1 + + + + java.util.logging.config.file + src/test/resources/logging.properties + + + + + + + + . + + tests.yml + + + + + + + release + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.3 + true + + ossrh + https://oss.sonatype.org/ + true + + + + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + + + + + + projectlombok.org + http://projectlombok.org/mavenrepo + + + diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.cfg b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.cfg new file mode 100644 index 0000000..972cab3 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.cfg @@ -0,0 +1,2 @@ +[tools:pytest] +flake8-max-line-length = 120 diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.py b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.py new file mode 100644 index 0000000..f601081 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.py @@ -0,0 +1,28 @@ +import json +import os +from setuptools import setup + +package_json = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'package.json') +with open(package_json) as f: + version = json.load(f)['version'] + +setup( + name='taskcluster-urls', + description='Standardized url generator for taskcluster resources.', + long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(), + long_description_content_type='text/markdown', + url='https://github.com/taskcluster/taskcluster-lib-urls', + version=version, + packages=['taskcluster_urls'], + author='Brian Stack', + author_email='bstack@mozilla.com', + license='MPL2', + classifiers=[ + 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + ], +) diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/tcurls.go b/vendor/github.com/taskcluster/taskcluster-lib-urls/tcurls.go new file mode 100644 index 0000000..53453f9 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/tcurls.go @@ -0,0 +1,108 @@ +package tcurls + +import ( + "fmt" + "strings" +) + +const oldRootURL = "https://taskcluster.net" + +// API generates a url for a resource in a taskcluster service +func API(rootURL string, service string, version string, path string) string { + path = strings.TrimLeft(path, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://%s.taskcluster.net/%s/%s", service, version, path) + default: + return fmt.Sprintf("%s/api/%s/%s/%s", r, service, version, path) + } +} + +// APIReference enerates a url for a taskcluster service reference doc +func APIReference(rootURL string, service string, version string) string { + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://references.taskcluster.net/%s/%s/api.json", service, version) + default: + return fmt.Sprintf("%s/references/%s/%s/api.json", r, service, version) + } +} + +// Docs generates a url for a taskcluster docs-site page +func Docs(rootURL string, path string) string { + path = strings.TrimLeft(path, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://docs.taskcluster.net/%s", path) + default: + return fmt.Sprintf("%s/docs/%s", r, path) + } +} + +// ExchangeReference generates a url for a taskcluster exchange reference doc +func ExchangeReference(rootURL string, service string, version string) string { + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://references.taskcluster.net/%s/%s/exchanges.json", service, version) + default: + return fmt.Sprintf("%s/references/%s/%s/exchanges.json", r, service, version) + } +} + +// Schema generates a url for a taskcluster schema +func Schema(rootURL string, service string, name string) string { + name = strings.TrimLeft(name, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://schemas.taskcluster.net/%s/%s", service, name) + default: + return fmt.Sprintf("%s/schemas/%s/%s", r, service, name) + } +} + +// APIReferenceSchema generates a url for the api reference schema +func APIReferenceSchema(rootURL string, version string) string { + return Schema(rootURL, "common", fmt.Sprintf("api-reference-%s.json", version)) +} + +// ExchangesReferenceSchema generates a url for the exchanges reference schema +func ExchangesReferenceSchema(rootURL string, version string) string { + return Schema(rootURL, "common", fmt.Sprintf("exchanges-reference-%s.json", version)) +} + +// APIManifestSchema generates a url for the api manifest schema +func APIManifestSchema(rootURL string, version string) string { + return Schema(rootURL, "common", fmt.Sprintf("manifest-%s.json", version)) +} + +// MetadataMetaschema generates a url for the metadata metaschema +func MetadataMetaschema(rootURL string) string { + return Schema(rootURL, "common", "metadata-metaschema.json") +} + +// UI generates a url for a page in taskcluster tools site +// The purpose of the function is to switch on rootUrl: +// "The driver for having a ui method is so we can just call ui with a path and any root url, +// and the returned url should work for both our current deployment (with root URL = https://taskcluster.net) +// and any future deployment. The returned value is essentially rootURL == 'https://taskcluster.net' +// ? 'https://tools.taskcluster.net/${path}' +// : '${rootURL}/${path}' " +func UI(rootURL string, path string) string { + path = strings.TrimLeft(path, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://tools.taskcluster.net/%s", path) + default: + return fmt.Sprintf("%s/%s", r, path) + } +} + +// APIManifest returns a URL for the service manifest of a taskcluster deployment +func APIManifest(rootURL string) string { + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return "https://references.taskcluster.net/manifest.json" + default: + return fmt.Sprintf("%s/references/manifest.json", r) + } +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/tests.yml b/vendor/github.com/taskcluster/taskcluster-lib-urls/tests.yml new file mode 100644 index 0000000..d05b53a --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/tests.yml @@ -0,0 +1,160 @@ +################################################################################ +# This document contains a language-agnostic set of test cases for validating +# language implementations. +# +# The format of this document is as follows: +# +# rootURLs: +# tests: +# function: [api, apiReference, docs, exchangeReference, schema, ui] +# argSets: +# expected: +# +# The specification for generating URLs that the libraries in this repository +# implement, is defined in `docs/urls-spec.md`. +################################################################################# +--- +rootURLs: + old: + - https://taskcluster.net + - https://taskcluster.net/ + - https://taskcluster.net// + new: + - https://taskcluster.example.com + - https://taskcluster.example.com/ + - https://taskcluster.example.com// + invalid: + - '12345' + empty: + - '' + +tests: +- function: api + argSets: + - [auth, v1, ping] + - [auth, v1, /ping] + - [auth, v1, //ping] + expected: + old: https://auth.taskcluster.net/v1/ping + new: https://taskcluster.example.com/api/auth/v1/ping + invalid: 12345/api/auth/v1/ping + empty: /api/auth/v1/ping +- function: api + argSets: + - [auth, v1, foo/ping] + - [auth, v1, /foo/ping] + - [auth, v1, //foo/ping] + expected: + old: https://auth.taskcluster.net/v1/foo/ping + new: https://taskcluster.example.com/api/auth/v1/foo/ping + invalid: 12345/api/auth/v1/foo/ping + empty: /api/auth/v1/foo/ping +- function: docs + argSets: + - [something/in/docs] + - [/something/in/docs] + - [//something/in/docs] + expected: + old: https://docs.taskcluster.net/something/in/docs + new: https://taskcluster.example.com/docs/something/in/docs + invalid: 12345/docs/something/in/docs + empty: /docs/something/in/docs +- function: schema + argSets: + - [auth, v1/something.json] + - [auth, /v1/something.json] + - [auth, //v1/something.json] + expected: + old: https://schemas.taskcluster.net/auth/v1/something.json + new: https://taskcluster.example.com/schemas/auth/v1/something.json + invalid: 12345/schemas/auth/v1/something.json + empty: /schemas/auth/v1/something.json +- function: schema + argSets: + - [auth, v2/something.json] + - [auth, /v2/something.json] + - [auth, //v2/something.json] + expected: + old: https://schemas.taskcluster.net/auth/v2/something.json + new: https://taskcluster.example.com/schemas/auth/v2/something.json + invalid: 12345/schemas/auth/v2/something.json + empty: /schemas/auth/v2/something.json +- function: apiReferenceSchema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/api-reference-v1.json + new: https://taskcluster.example.com/schemas/common/api-reference-v1.json + invalid: 12345/schemas/common/api-reference-v1.json + empty: /schemas/common/api-reference-v1.json +- function: exchangesReferenceSchema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/exchanges-reference-v1.json + new: https://taskcluster.example.com/schemas/common/exchanges-reference-v1.json + invalid: 12345/schemas/common/exchanges-reference-v1.json + empty: /schemas/common/exchanges-reference-v1.json +- function: apiManifestSchema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/manifest-v1.json + new: https://taskcluster.example.com/schemas/common/manifest-v1.json + invalid: 12345/schemas/common/manifest-v1.json + empty: /schemas/common/manifest-v1.json +- function: metadataMetaschema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/metadata-metaschema.json + new: https://taskcluster.example.com/schemas/common/metadata-metaschema.json + invalid: 12345/schemas/common/metadata-metaschema.json + empty: /schemas/common/metadata-metaschema.json +- function: apiReference + argSets: + - [auth, v1] + expected: + old: https://references.taskcluster.net/auth/v1/api.json + new: https://taskcluster.example.com/references/auth/v1/api.json + invalid: 12345/references/auth/v1/api.json + empty: /references/auth/v1/api.json +- function: exchangeReference + argSets: + - [auth, v1] + expected: + old: https://references.taskcluster.net/auth/v1/exchanges.json + new: https://taskcluster.example.com/references/auth/v1/exchanges.json + invalid: 12345/references/auth/v1/exchanges.json + empty: /references/auth/v1/exchanges.json +- function: ui + argSets: + - [something] + - [/something] + - [//something] + expected: + old: https://tools.taskcluster.net/something + new: https://taskcluster.example.com/something + invalid: 12345/something + empty: /something +- function: ui + argSets: + - [''] + - [/] + - [//] + expected: + old: https://tools.taskcluster.net/ + new: https://taskcluster.example.com/ + invalid: 12345/ + empty: / +- function: apiManifest + argSets: + - [] + expected: + old: https://references.taskcluster.net/manifest.json + new: https://taskcluster.example.com/references/manifest.json + invalid: 12345/references/manifest.json + empty: /references/manifest.json diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/tox.ini b/vendor/github.com/taskcluster/taskcluster-lib-urls/tox.ini new file mode 100644 index 0000000..dd2dc18 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/tox.ini @@ -0,0 +1,11 @@ +[tox] +envlist = py27,py36 + +[testenv] +deps = + pyyaml + pytest + pytest-cov + pytest-flake8 +commands = + pytest -v --flake8 --cov=taskcluster_urls {posargs:test} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/yarn.lock b/vendor/github.com/taskcluster/taskcluster-lib-urls/yarn.lock new file mode 100644 index 0000000..f9b8d35 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/yarn.lock @@ -0,0 +1,1005 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +acorn-jsx@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-3.0.1.tgz#afdf9488fb1ecefc8348f6fb22f464e32a58b36b" + integrity sha1-r9+UiPsezvyDSPb7IvRk4ypYs2s= + dependencies: + acorn "^3.0.4" + +acorn@^3.0.4: + version "3.3.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-3.3.0.tgz#45e37fb39e8da3f25baee3ff5369e2bb5f22017a" + integrity sha1-ReN/s56No/JbruP/U2niu18iAXo= + +acorn@^5.5.0: + version "5.5.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.5.3.tgz#f473dd47e0277a08e28e9bec5aeeb04751f0b8c9" + integrity sha512-jd5MkIUlbbmb07nXH0DT3y7rDVtkzDi4XZOUVWAer8ajmF/DTSSbl5oNFyDOl/OXA33Bl79+ypHhl2pN20VeOQ== + +ajv-keywords@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-2.1.1.tgz#617997fc5f60576894c435f940d819e135b80762" + integrity sha1-YXmX/F9gV2iUxDX5QNgZ4TW4B2I= + +ajv@^5.2.3, ajv@^5.3.0: + version "5.5.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" + integrity sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU= + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +ansi-escapes@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.1.0.tgz#f73207bb81207d75fd6c83f125af26eea378ca30" + integrity sha512-UgAb8H9D41AQnu/PbWlCofQVcnV4Gs2bBJi9eZPxfU/hgglFh3SMDMENRIqdr7H6XFnXdoknctFByVsCOotTVw== + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk= + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0= + +babel-code-frame@^6.22.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +browser-stdout@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" + integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== + +buffer-from@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.0.0.tgz#4cb8832d23612589b0406e9e2956c17f06fdf531" + integrity sha512-83apNb8KK0Se60UE1+4Ukbe3HbfELJ6UlI4ldtOGs7So4KD26orJM8hIY9lxdzP+UpItH1Yh/Y8GUvNFWFFRxA== + +caller-path@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-0.1.0.tgz#94085ef63581ecd3daa92444a8fe94e82577751f" + integrity sha1-lAhe9jWB7NPaqSREqP6U6CV3dR8= + dependencies: + callsites "^0.2.0" + +callsites@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-0.2.0.tgz#afab96262910a7f33c19a5775825c69f34e350ca" + integrity sha1-r6uWJikQp/M8GaV3WCXGnzTjUMo= + +chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg= + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.0, chalk@^2.1.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" + integrity sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chardet@^0.4.0: + version "0.4.2" + resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.4.2.tgz#b5473b33dc97c424e5d98dc87d55d4d8a29c8bf2" + integrity sha1-tUc7M9yXxCTl2Y3IfVXU2KKci/I= + +circular-json@^0.3.1: + version "0.3.3" + resolved "https://registry.yarnpkg.com/circular-json/-/circular-json-0.3.3.tgz#815c99ea84f6809529d2f45791bdf82711352d66" + integrity sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A== + +cli-cursor@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + integrity sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU= + dependencies: + restore-cursor "^2.0.0" + +cli-width@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.0.tgz#ff19ede8a9a5e579324147b0c11f0fbcbabed639" + integrity sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk= + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ= + +color-convert@^1.9.0: + version "1.9.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.1.tgz#c1261107aeb2f294ebffec9ed9ecad529a6097ed" + integrity sha512-mjGanIiwQJskCC18rPR6OmrZ6fm2Lc7PeGFYwCmy5J34wC6F1PzdGL6xeMfmgicfYcNLGuVFA3WzXtIDCQSZxQ== + dependencies: + color-name "^1.1.1" + +color-name@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + +commander@2.11.0: + version "2.11.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.11.0.tgz#157152fd1e7a6c8d98a5b715cf376df928004563" + integrity sha512-b0553uYA5YAEGgyYIGYROzKQ7X5RAqedkfjiZxwi0kL1g3bOaBNNZfYkzt/CL0umgD5wc9Jec2FbB98CjkMRvQ== + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= + +concat-stream@^1.6.0: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= + +cross-spawn@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + integrity sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk= + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + +debug@3.1.0, debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + integrity sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g== + dependencies: + ms "2.0.0" + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= + +del@^2.0.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/del/-/del-2.2.2.tgz#c12c981d067846c84bcaf862cff930d907ffd1a8" + integrity sha1-wSyYHQZ4RshLyvhiz/kw2Qf/0ag= + dependencies: + globby "^5.0.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + rimraf "^2.2.8" + +diff@3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" + integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== + +doctrine@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" + integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw== + dependencies: + esutils "^2.0.2" + +escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +eslint-config-taskcluster@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/eslint-config-taskcluster/-/eslint-config-taskcluster-3.1.0.tgz#a8ec7efd59a88f33abd9c8256adb69ee60775007" + integrity sha512-xemOAkCVbkbMOmOZ9xHR134qzpJCjlWC2ymzNTVLdW02T7/vm0/38ADC1WQeHAWXNapLq8VrunXbk220ROrJqw== + dependencies: + eslint "^4.10.0" + +eslint-scope@^3.7.1: + version "3.7.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-3.7.1.tgz#3d63c3edfda02e06e01a452ad88caacc7cdcb6e8" + integrity sha1-PWPD7f2gLgbgGkUq2IyqzHzctug= + dependencies: + esrecurse "^4.1.0" + estraverse "^4.1.1" + +eslint-visitor-keys@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz#3f3180fb2e291017716acb4c9d6d5b5c34a6a81d" + integrity sha512-qzm/XxIbxm/FHyH341ZrbnMUpe+5Bocte9xkmFMzPMjRaZMcXww+MpBptFvtU+79L362nqiLhekCxCxDPaUMBQ== + +eslint@^4.10.0: + version "4.19.1" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-4.19.1.tgz#32d1d653e1d90408854bfb296f076ec7e186a300" + integrity sha512-bT3/1x1EbZB7phzYu7vCr1v3ONuzDtX8WjuM9c0iYxe+cq+pwcKEoQjl7zd3RpC6YOLgnSy3cTN58M2jcoPDIQ== + dependencies: + ajv "^5.3.0" + babel-code-frame "^6.22.0" + chalk "^2.1.0" + concat-stream "^1.6.0" + cross-spawn "^5.1.0" + debug "^3.1.0" + doctrine "^2.1.0" + eslint-scope "^3.7.1" + eslint-visitor-keys "^1.0.0" + espree "^3.5.4" + esquery "^1.0.0" + esutils "^2.0.2" + file-entry-cache "^2.0.0" + functional-red-black-tree "^1.0.1" + glob "^7.1.2" + globals "^11.0.1" + ignore "^3.3.3" + imurmurhash "^0.1.4" + inquirer "^3.0.6" + is-resolvable "^1.0.0" + js-yaml "^3.9.1" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.3.0" + lodash "^4.17.4" + minimatch "^3.0.2" + mkdirp "^0.5.1" + natural-compare "^1.4.0" + optionator "^0.8.2" + path-is-inside "^1.0.2" + pluralize "^7.0.0" + progress "^2.0.0" + regexpp "^1.0.1" + require-uncached "^1.0.3" + semver "^5.3.0" + strip-ansi "^4.0.0" + strip-json-comments "~2.0.1" + table "4.0.2" + text-table "~0.2.0" + +espree@^3.5.4: + version "3.5.4" + resolved "https://registry.yarnpkg.com/espree/-/espree-3.5.4.tgz#b0f447187c8a8bed944b815a660bddf5deb5d1a7" + integrity sha512-yAcIQxtmMiB/jL32dzEp2enBeidsB7xWPLNiw3IIkpVds1P+h7qF9YwJq1yUNzp2OKXgAprs4F61ih66UsoD1A== + dependencies: + acorn "^5.5.0" + acorn-jsx "^3.0.0" + +esprima@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + integrity sha512-oftTcaMu/EGrEIu904mWteKIv8vMuOgGYo7EhVJJN00R/EED9DCua/xxHRdYnKtcECzVg7xOWhflvJMnqcFZjw== + +esquery@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.0.1.tgz#406c51658b1f5991a5f9b62b1dc25b00e3e5c708" + integrity sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA== + dependencies: + estraverse "^4.0.0" + +esrecurse@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" + integrity sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ== + dependencies: + estraverse "^4.1.0" + +estraverse@^4.0.0, estraverse@^4.1.0, estraverse@^4.1.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.2.0.tgz#0dee3fed31fcd469618ce7342099fc1afa0bdb13" + integrity sha1-De4/7TH81GlhjOc0IJn8GvoL2xM= + +esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + integrity sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs= + +external-editor@^2.0.4: + version "2.2.0" + resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-2.2.0.tgz#045511cfd8d133f3846673d1047c154e214ad3d5" + integrity sha512-bSn6gvGxKt+b7+6TKEv1ZycHleA7aHhRHyAqJyp5pbUFuYYNIzpZnQDk7AsYckyWdEnTeAnay0aCy2aV6iTk9A== + dependencies: + chardet "^0.4.0" + iconv-lite "^0.4.17" + tmp "^0.0.33" + +fast-deep-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" + integrity sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ= + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I= + +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= + +figures@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" + integrity sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI= + dependencies: + escape-string-regexp "^1.0.5" + +file-entry-cache@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-2.0.0.tgz#c392990c3e684783d838b8c84a45d8a048458361" + integrity sha1-w5KZDD5oR4PYOLjISkXYoEhFg2E= + dependencies: + flat-cache "^1.2.1" + object-assign "^4.0.1" + +flat-cache@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-1.3.0.tgz#d3030b32b38154f4e3b7e9c709f490f7ef97c481" + integrity sha1-0wMLMrOBVPTjt+nHCfSQ9++XxIE= + dependencies: + circular-json "^0.3.1" + del "^2.0.2" + graceful-fs "^4.1.2" + write "^0.2.1" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= + +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= + +glob@7.1.2, glob@^7.0.3, glob@^7.0.5, glob@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + integrity sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^11.0.1: + version "11.5.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.5.0.tgz#6bc840de6771173b191f13d3a9c94d441ee92642" + integrity sha512-hYyf+kI8dm3nORsiiXUQigOU62hDLfJ9G01uyGMxhc6BKsircrUhC4uJPQPUSuq2GrTmiiEt7ewxlMdBewfmKQ== + +globby@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-5.0.0.tgz#ebd84667ca0dbb330b99bcfc68eac2bc54370e0d" + integrity sha1-69hGZ8oNuzMLmbz8aOrCvFQ3Dg0= + dependencies: + array-union "^1.0.1" + arrify "^1.0.0" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +graceful-fs@^4.1.2: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + integrity sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg= + +growl@1.10.3: + version "1.10.3" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.3.tgz#1926ba90cf3edfe2adb4927f5880bc22c66c790f" + integrity sha512-hKlsbA5Vu3xsh1Cg3J7jSmX/WaW6A5oBeqzM88oNbCRQFz+zUaXm6yxS4RVytp1scBoJzSYl4YAEOQIt6O8V1Q== + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE= + dependencies: + ansi-regex "^2.0.0" + +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + integrity sha1-6CB68cx7MNRGzHC3NLXovhj4jVE= + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +he@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" + integrity sha1-k0EP0hsAlzUVH4howvJx80J+I/0= + +iconv-lite@^0.4.17: + version "0.4.21" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.21.tgz#c47f8733d02171189ebc4a400f3218d348094798" + integrity sha512-En5V9za5mBt2oUA03WGD3TwDv0MKAruqsuxstbMUZaj9W9k/m1CV/9py3l0L5kw9Bln8fdHQmzHSYtvpvTLpKw== + dependencies: + safer-buffer "^2.1.0" + +ignore@^3.3.3: + version "3.3.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.8.tgz#3f8e9c35d38708a3a7e0e9abb6c73e7ee7707b2b" + integrity sha512-pUh+xUQQhQzevjRHHFqqcTy0/dP/kS9I8HSrUydhihjuD09W6ldVWFtIrwhXdUJHis3i2rZNqEHpZH/cbinFbg== + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@^2.0.3, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= + +inquirer@^3.0.6: + version "3.3.0" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-3.3.0.tgz#9dd2f2ad765dcab1ff0443b491442a20ba227dc9" + integrity sha512-h+xtnyk4EwKvFWHrUYsWErEVR+igKtLdchu+o0Z1RL7VU/jVMFbYir2bp6bAj8efFNxWqHX0dIss6fJQ+/+qeQ== + dependencies: + ansi-escapes "^3.0.0" + chalk "^2.0.0" + cli-cursor "^2.1.0" + cli-width "^2.0.0" + external-editor "^2.0.4" + figures "^2.0.0" + lodash "^4.3.0" + mute-stream "0.0.7" + run-async "^2.2.0" + rx-lite "^4.0.8" + rx-lite-aggregates "^4.0.8" + string-width "^2.1.0" + strip-ansi "^4.0.0" + through "^2.3.6" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= + +is-path-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" + integrity sha1-0iXsIxMuie3Tj9p2dHLmLmXxEG0= + +is-path-in-cwd@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52" + integrity sha512-FjV1RTW48E7CWM7eE/J2NJvAEEVektecDBVBE5Hh3nM1Jd0kvhHtX68Pr3xsDf857xt3Y4AkwVULK1Vku62aaQ== + dependencies: + is-path-inside "^1.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + integrity sha1-jvW33lBDej/cprToZe96pVy0gDY= + dependencies: + path-is-inside "^1.0.1" + +is-promise@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-2.1.0.tgz#79a2a9ece7f096e80f36d2b2f3bc16c1ff4bf3fa" + integrity sha1-eaKp7OfwlugPNtKy87wWwf9L8/o= + +is-resolvable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-resolvable/-/is-resolvable-1.1.0.tgz#fb18f87ce1feb925169c9a407c19318a3206ed88" + integrity sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg== + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= + +js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= + +js-yaml@^3.11.0, js-yaml@^3.9.1: + version "3.11.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.11.0.tgz#597c1a8bd57152f26d622ce4117851a51f5ebaef" + integrity sha512-saJstZWv7oNeOyBh3+Dx1qWzhW0+e6/8eDzo7p5rDFqxntSztloLtuKu+Ejhtq82jsilwOIZYsCz+lIjthg1Hw== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + integrity sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A= + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE= + +levn@^0.3.0, levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +lodash@^4.17.4, lodash@^4.3.0: + version "4.17.10" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" + integrity sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg== + +lru-cache@^4.0.1: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.2.tgz#45234b2e6e2f2b33da125624c4664929a0224c3f" + integrity sha512-wgeVXhrDwAWnIF/yZARsFnMBtdFXOg1b8RIrhilp+0iDYN4mdQcNZElDZ0e4B64BhaxeQ5zN7PMyvu7we1kPeQ== + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== + +minimatch@3.0.4, minimatch@^3.0.2, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= + +mkdirp@0.5.1, mkdirp@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= + dependencies: + minimist "0.0.8" + +mocha@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-5.1.1.tgz#b774c75609dac05eb48f4d9ba1d827b97fde8a7b" + integrity sha512-kKKs/H1KrMMQIEsWNxGmb4/BGsmj0dkeyotEvbrAuQ01FcWRLssUNXCEUZk6SZtyJBi6EE7SL0zDDtItw1rGhw== + dependencies: + browser-stdout "1.3.1" + commander "2.11.0" + debug "3.1.0" + diff "3.5.0" + escape-string-regexp "1.0.5" + glob "7.1.2" + growl "1.10.3" + he "1.1.1" + minimatch "3.0.4" + mkdirp "0.5.1" + supports-color "4.4.0" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= + +mute-stream@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" + integrity sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s= + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= + +object-assign@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= + dependencies: + wrappy "1" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + integrity sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ= + dependencies: + mimic-fn "^1.0.0" + +optionator@^0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + integrity sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q= + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + +os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-is-inside@^1.0.1, path-is-inside@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + integrity sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM= + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= + +pluralize@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-7.0.0.tgz#298b89df8b93b0221dbf421ad2b1b1ea23fc6777" + integrity sha512-ARhBOdzS3e41FbkW/XWrTEtukqqLoK5+Z/4UeDaLuSW+39JPeFgs4gCGqsrJHVZX0fUrx//4OF0K1CUGwlIFow== + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw== + +progress@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.0.tgz#8a1be366bf8fc23db2bd23f10c6fe920b4389d1f" + integrity sha1-ihvjZr+Pwj2yvSPxDG/pILQ4nR8= + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM= + +readable-stream@^2.2.2: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +regexpp@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-1.1.0.tgz#0e3516dd0b7904f413d2d4193dce4618c3a689ab" + integrity sha512-LOPw8FpgdQF9etWMaAfG/WRthIdXJGYp4mJ2Jgn/2lpkbod9jPn0t9UqN7AxBOKNfzRbYyVfgc7Vk4t/MpnXgw== + +require-uncached@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/require-uncached/-/require-uncached-1.0.3.tgz#4e0d56d6c9662fd31e43011c4b95aa49955421d3" + integrity sha1-Tg1W1slmL9MeQwEcS5WqSZVUIdM= + dependencies: + caller-path "^0.1.0" + resolve-from "^1.0.0" + +resolve-from@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-1.0.1.tgz#26cbfe935d1aeeeabb29bc3fe5aeb01e93d44226" + integrity sha1-Jsv+k10a7uq7Kbw/5a6wHpPUQiY= + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + integrity sha1-n37ih/gv0ybU/RYpI9YhKe7g368= + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +rimraf@^2.2.8: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + integrity sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w== + dependencies: + glob "^7.0.5" + +run-async@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.3.0.tgz#0371ab4ae0bdd720d4166d7dfda64ff7a445a6c0" + integrity sha1-A3GrSuC91yDUFm19/aZP96RFpsA= + dependencies: + is-promise "^2.1.0" + +rx-lite-aggregates@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/rx-lite-aggregates/-/rx-lite-aggregates-4.0.8.tgz#753b87a89a11c95467c4ac1626c4efc4e05c67be" + integrity sha1-dTuHqJoRyVRnxKwWJsTvxOBcZ74= + dependencies: + rx-lite "*" + +rx-lite@*, rx-lite@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/rx-lite/-/rx-lite-4.0.8.tgz#0b1e11af8bc44836f04a6407e92da42467b79444" + integrity sha1-Cx4Rr4vESDbwSmQH6S2kJGe3lEQ= + +safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safer-buffer@^2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +semver@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + integrity sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA== + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo= + dependencies: + shebang-regex "^1.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= + +signal-exit@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0= + +slice-ansi@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-1.0.0.tgz#044f1a49d8842ff307aad6b505ed178bd950134d" + integrity sha512-POqxBK6Lb3q6s047D/XsDVNPnF9Dl8JSaqe9h9lURl0OdNqy/ujDrOiIHtsqXMGbWWTIomRzAMaTyawAU//Reg== + dependencies: + is-fullwidth-code-point "^2.0.0" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= + +string-width@^2.1.0, string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +strip-ansi@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= + dependencies: + ansi-regex "^3.0.0" + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +supports-color@4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.4.0.tgz#883f7ddabc165142b2a61427f3352ded195d1a3e" + integrity sha512-rKC3+DyXWgK0ZLKwmRsrkyHVZAjNkfzeehuFWdGGcqGDTZFH73+RH6S/RDAAxl9GusSjZSUWYLmT9N5pzXFOXQ== + dependencies: + has-flag "^2.0.0" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= + +supports-color@^5.3.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" + integrity sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w== + dependencies: + has-flag "^3.0.0" + +table@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/table/-/table-4.0.2.tgz#a33447375391e766ad34d3486e6e2aedc84d2e36" + integrity sha512-UUkEAPdSGxtRpiV9ozJ5cMTtYiqz7Ni1OGqLXRCynrvzdtR1p+cfOWe2RJLwvUG8hNanaSRjecIqwOjqeatDsA== + dependencies: + ajv "^5.2.3" + ajv-keywords "^2.1.0" + chalk "^2.1.0" + lodash "^4.17.4" + slice-ansi "1.0.0" + string-width "^2.1.1" + +text-table@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha1-f17oI66AUgfACvLfSoTsP8+lcLQ= + +through@^2.3.6: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= + +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= + dependencies: + prelude-ls "~1.1.2" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= + +which@^1.2.9: + version "1.3.0" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.0.tgz#ff04bdfc010ee547d780bec38e1ac1c2777d253a" + integrity sha512-xcJpopdamTuY5duC/KnTTNBraPK54YwpenP4lzxU8H91GudWpFv38u0CKjclE1Wi2EH2EDz5LRcHcKbCIzqGyg== + dependencies: + isexe "^2.0.0" + +wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus= + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +write@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/write/-/write-0.2.1.tgz#5fc03828e264cea3fe91455476f7a3c566cb0757" + integrity sha1-X8A4KOJkzqP+kUVUdvejxWbLB1c= + dependencies: + mkdirp "^0.5.1" + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI= diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/.gitignore b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/.gitignore new file mode 100644 index 0000000..e1bda13 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Coverage report +coverage.report diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/LICENSE b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/README.md b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/README.md new file mode 100644 index 0000000..b72cbd2 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/README.md @@ -0,0 +1,591 @@ +# Taskcluster Client Go + +[![GoDoc](https://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23?status.svg)](https://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23) +[![Coverage Status](https://coveralls.io/repos/taskcluster/taskcluster/clients/client-go/badge.svg?branch=master&service=github)](https://coveralls.io/github/taskcluster/taskcluster/clients/client-go?branch=master) +[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0) + +A go (golang) port of taskcluster-client. + +Complete godoc documentation [here](https://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23). + +This library provides the following packages to interface with Taskcluster: + +### HTTP APIs +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcawsprovisioner +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcec2manager +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcgithub +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tchooks +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcindex +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tclogin +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcnotify +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcpurgecache +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcsecrets + +### AMQP APIs +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauthevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcawsprovisionerevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcgithubevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcpurgecacheevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents + +## Example programs + +To get you started quickly, some example programs are included that use both the HTTP APIs and the AMQP APIs: + +* This [HTTP example program](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth#example-package--Scopes) demonstrates the use of the [tcauth](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth) package to query the expiry and expanded scopes of a given clientId. +* This [HTTP example program](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth#example-package--UpdateClient) demonstrates the use of the [tcauth](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth) package to update an existing clientId with a new description and expiry. +* The [AMQP example program](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents#example-package--TaskclusterSniffer) demonstrates the use of the [tcqueueevents](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents) package to listen in on Taskcluster tasks being defined and executed. + +## Calling API End-Points + +To invoke an API end-point, instantiate one of the HTTP API classes (from +section [HTTP APIs](#http-apis)). In the following example we instantiate an +instance of the `Queue` client class and use it to create a task. + +```go +package main + +import ( + "encoding/json" + "fmt" + "log" + "time" + + "github.com/taskcluster/slugid-go/slugid" + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue" +) + +// ********************************************************* +// These type definitions are copied from: +// https://github.com/taskcluster/generic-worker/blob/5cb2876624ce43974b1e1f96205535b037d63953/generated_windows.go#L11-L377 +// ********************************************************* +type ( + Artifact struct { + + // Explicitly set the value of the HTTP `Content-Type` response header when the artifact(s) + // is/are served over HTTP(S). If not provided (this property is optional) the worker will + // guess the content type of artifacts based on the filename extension of the file storing + // the artifact content. It does this by looking at the system filename-to-mimetype mappings + // defined in the Windows registry. Note, setting `contentType` on a directory artifact will + // apply the same contentType to all files contained in the directory. + // + // See [mime.TypeByExtension](https://godoc.org/mime#TypeByExtension). + // + // Since: generic-worker 10.4.0 + ContentType string `json:"contentType,omitempty"` + + // Date when artifact should expire must be in the future, no earlier than task deadline, but + // no later than task expiry. If not set, defaults to task expiry. + // + // Since: generic-worker 1.0.0 + Expires tcclient.Time `json:"expires,omitempty"` + + // Name of the artifact, as it will be published. If not set, `path` will be used. + // Conventionally (although not enforced) path elements are forward slash separated. Example: + // `public/build/a/house`. Note, no scopes are required to read artifacts beginning `public/`. + // Artifact names not beginning `public/` are scope-protected (caller requires scopes to + // download the artifact). See the Queue documentation for more information. + // + // Since: generic-worker 8.1.0 + Name string `json:"name,omitempty"` + + // Relative path of the file/directory from the task directory. Note this is not an absolute + // path as is typically used in docker-worker, since the absolute task directory name is not + // known when the task is submitted. Example: `dist\regedit.exe`. It doesn't matter if + // forward slashes or backslashes are used. + // + // Since: generic-worker 1.0.0 + Path string `json:"path"` + + // Artifacts can be either an individual `file` or a `directory` containing + // potentially multiple files with recursively included subdirectories. + // + // Since: generic-worker 1.0.0 + // + // Possible values: + // * "file" + // * "directory" + Type string `json:"type"` + } + + // Requires scope `queue:get-artifact:`. + // + // Since: generic-worker 5.4.0 + ArtifactContent struct { + + // Max length: 1024 + Artifact string `json:"artifact"` + + // The required SHA 256 of the content body. + // + // Since: generic-worker 10.8.0 + // + // Syntax: ^[a-f0-9]{64}$ + Sha256 string `json:"sha256,omitempty"` + + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + } + + // Base64 encoded content of file/archive, up to 64KB (encoded) in size. + // + // Since: generic-worker 11.1.0 + Base64Content struct { + + // Base64 encoded content of file/archive, up to 64KB (encoded) in size. + // + // Since: generic-worker 11.1.0 + // + // Syntax: ^[A-Za-z0-9/+]+[=]{0,2}$ + // Max length: 65536 + Base64 string `json:"base64"` + } + + // By default tasks will be resolved with `state/reasonResolved`: `completed/completed` + // if all task commands have a zero exit code, or `failed/failed` if any command has a + // non-zero exit code. This payload property allows customsation of the task resolution + // based on exit code of task commands. + ExitCodeHandling struct { + + // Exit codes for any command in the task payload to cause this task to + // be resolved as `exception/intermittent-task`. Typically the Queue + // will then schedule a new run of the existing `taskId` (rerun) if not + // all task runs have been exhausted. + // + // See [itermittent tasks](https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/docs/worker-interaction#intermittent-tasks) for more detail. + // + // Since: generic-worker 10.10.0 + // + // Array items: + // Mininum: 1 + Retry []int64 `json:"retry,omitempty"` + } + + // Feature flags enable additional functionality. + // + // Since: generic-worker 5.3.0 + FeatureFlags struct { + + // An artifact named `public/chainOfTrust.json.asc` should be generated + // which will include information for downstream tasks to build a level + // of trust for the artifacts produced by the task and the environment + // it ran in. + // + // Since: generic-worker 5.3.0 + ChainOfTrust bool `json:"chainOfTrust,omitempty"` + + // Runs commands with UAC elevation. Only set to true when UAC is + // enabled on the worker and Administrative privileges are required by + // task commands. When UAC is disabled on the worker, task commands will + // already run with full user privileges, and therefore a value of true + // will result in a malformed-payload task exception. + // + // A value of true does not add the task user to the `Administrators` + // group - see the `osGroups` property for that. Typically + // `task.payload.osGroups` should include an Administrative group, such + // as `Administrators`, when setting to true. + // + // For security, `runAsAdministrator` feature cannot be used in + // conjunction with `chainOfTrust` feature. + // + // Requires scope + // `generic-worker:run-as-administrator:/`. + // + // Since: generic-worker 10.11.0 + RunAsAdministrator bool `json:"runAsAdministrator,omitempty"` + + // The taskcluster proxy provides an easy and safe way to make authenticated + // taskcluster requests within the scope(s) of a particular task. See + // [the github project](https://github.com/taskcluster/taskcluster-proxy) for more information. + // + // Since: generic-worker 10.6.0 + TaskclusterProxy bool `json:"taskclusterProxy,omitempty"` + } + + FileMount struct { + + // One of: + // * ArtifactContent + // * URLContent + // * RawContent + // * Base64Content + Content json.RawMessage `json:"content"` + + // The filesystem location to mount the file. + // + // Since: generic-worker 5.4.0 + File string `json:"file"` + } + + // This schema defines the structure of the `payload` property referred to in a + // Taskcluster Task definition. + GenericWorkerPayload struct { + + // Artifacts to be published. + // + // Since: generic-worker 1.0.0 + Artifacts []Artifact `json:"artifacts,omitempty"` + + // One entry per command (consider each entry to be interpreted as a full line of + // a Windows™ .bat file). For example: + // ``` + // [ + // "set", + // "echo hello world > hello_world.txt", + // "set GOPATH=C:\\Go" + // ] + // ``` + // + // Since: generic-worker 0.0.1 + // + // Array items: + Command []string `json:"command"` + + // Env vars must be string to __string__ mappings (not number or boolean). For example: + // ``` + // { + // "PATH": "C:\\Windows\\system32;C:\\Windows", + // "GOOS": "windows", + // "FOO_ENABLE": "true", + // "BAR_TOTAL": "3" + // } + // ``` + // + // Since: generic-worker 0.0.1 + // + // Map entries: + Env map[string]string `json:"env,omitempty"` + + // Feature flags enable additional functionality. + // + // Since: generic-worker 5.3.0 + Features FeatureFlags `json:"features,omitempty"` + + // Maximum time the task container can run in seconds. + // + // Since: generic-worker 0.0.1 + // + // Mininum: 1 + // Maximum: 86400 + MaxRunTime int64 `json:"maxRunTime"` + + // Directories and/or files to be mounted. + // + // Since: generic-worker 5.4.0 + // + // Array items: + // One of: + // * FileMount + // * WritableDirectoryCache + // * ReadOnlyDirectory + Mounts []json.RawMessage `json:"mounts,omitempty"` + + // By default tasks will be resolved with `state/reasonResolved`: `completed/completed` + // if all task commands have a zero exit code, or `failed/failed` if any command has a + // non-zero exit code. This payload property allows customsation of the task resolution + // based on exit code of task commands. + OnExitStatus ExitCodeHandling `json:"onExitStatus,omitempty"` + + // A list of OS Groups that the task user should be a member of. Requires scope + // `generic-worker:os-group://` for each + // group listed. + // + // Since: generic-worker 6.0.0 + // + // Array items: + OSGroups []string `json:"osGroups,omitempty"` + + // Specifies an artifact name for publishing RDP connection information. + // + // Since this is potentially sensitive data, care should be taken to publish + // to a suitably locked down path, such as + // `login-identity//rdpinfo.json` which is only readable for + // the given login identity (for example + // `login-identity/mozilla-ldap/pmoore@mozilla.com/rdpinfo.json`). See the + // [artifact namespace guide](https://docs.taskcluster.net/manual/design/namespaces#artifacts) for more information. + // + // Use of this feature requires scope + // `generic-worker:allow-rdp:/` which must be + // declared as a task scope. + // + // The RDP connection data is published during task startup so that a user + // may interact with the running task. + // + // The task environment will be retained for 12 hours after the task + // completes, to enable an interactive user to perform investigative tasks. + // After these 12 hours, the worker will delete the task's Windows user + // account, and then continue with other tasks. + // + // No guarantees are given about the resolution status of the interactive + // task, since the task is inherently non-reproducible and no automation + // should rely on this value. + // + // Since: generic-worker 10.5.0 + RdpInfo string `json:"rdpInfo,omitempty"` + + // URL of a service that can indicate tasks superseding this one; the current `taskId` + // will be appended as a query argument `taskId`. The service should return an object with + // a `supersedes` key containing a list of `taskId`s, including the supplied `taskId`. The + // tasks should be ordered such that each task supersedes all tasks appearing later in the + // list. + // + // See [superseding](https://docs.taskcluster.net/reference/platform/taskcluster-queue/docs/superseding) for more detail. + // + // Since: generic-worker 10.2.2 + SupersederURL string `json:"supersederUrl,omitempty"` + } + + // Byte-for-byte literal inline content of file/archive, up to 64KB in size. + // + // Since: generic-worker 11.1.0 + RawContent struct { + + // Byte-for-byte literal inline content of file/archive, up to 64KB in size. + // + // Since: generic-worker 11.1.0 + // + // Max length: 65536 + Raw string `json:"raw"` + } + + ReadOnlyDirectory struct { + + // One of: + // * ArtifactContent + // * URLContent + // * RawContent + // * Base64Content + Content json.RawMessage `json:"content"` + + // The filesystem location to mount the directory volume. + // + // Since: generic-worker 5.4.0 + Directory string `json:"directory"` + + // Archive format of content for read only directory. + // + // Since: generic-worker 5.4.0 + // + // Possible values: + // * "rar" + // * "tar.bz2" + // * "tar.gz" + // * "zip" + Format string `json:"format"` + } + + // URL to download content from. + // + // Since: generic-worker 5.4.0 + URLContent struct { + + // The required SHA 256 of the content body. + // + // Since: generic-worker 10.8.0 + // + // Syntax: ^[a-f0-9]{64}$ + Sha256 string `json:"sha256,omitempty"` + + // URL to download content from. + // + // Since: generic-worker 5.4.0 + URL string `json:"url"` + } + + WritableDirectoryCache struct { + + // Implies a read/write cache directory volume. A unique name for the + // cache volume. Requires scope `generic-worker:cache:`. + // Note if this cache is loaded from an artifact, you will also require + // scope `queue:get-artifact:` to use this cache. + // + // Since: generic-worker 5.4.0 + CacheName string `json:"cacheName"` + + // One of: + // * ArtifactContent + // * URLContent + // * RawContent + // * Base64Content + Content json.RawMessage `json:"content,omitempty"` + + // The filesystem location to mount the directory volume. + // + // Since: generic-worker 5.4.0 + Directory string `json:"directory"` + + // Archive format of the preloaded content (if `content` provided). + // + // Since: generic-worker 5.4.0 + // + // Possible values: + // * "rar" + // * "tar.bz2" + // * "tar.gz" + // * "zip" + Format string `json:"format,omitempty"` + } +) + +func fatalOnError(err error) { + if err != nil { + log.Fatalf("Error:\n%v", err) + } +} + +func mustCompileToRawMessage(data interface{}) *json.RawMessage { + bytes, err := json.Marshal(data) + fatalOnError(err) + var JSON json.RawMessage + err = json.Unmarshal(bytes, &JSON) + fatalOnError(err) + return &JSON +} + +func main() { + myQueue := tcqueue.NewFromEnv() + taskID := slugid.Nice() + created := time.Now() + + env := map[string]string{} + + payload := GenericWorkerPayload{ + Artifacts: []Artifact{}, + Command: []string{ + `echo Hello World!`, + }, + Env: env, + Features: FeatureFlags{ + ChainOfTrust: false, + }, + MaxRunTime: 60, + Mounts: []json.RawMessage{}, + OSGroups: []string{}, + } + + payloadJSON := mustCompileToRawMessage(payload) + + taskDef := &tcqueue.TaskDefinitionRequest{ + Created: tcclient.Time(created), + Deadline: tcclient.Time(created.Add(time.Hour * 3)), + Dependencies: []string{}, + Expires: tcclient.Time(created.Add(time.Hour * 24)), + Extra: json.RawMessage("{}"), + Metadata: tcqueue.TaskMetadata{ + Description: "xxxx", + Name: "xxxx", + Owner: "pmoore@mozilla.com", + Source: "https://hg.mozilla.org/try/file/xxxx", + }, + Payload: *payloadJSON, + Priority: "normal", + ProvisionerID: "some-provisioner-id", + Requires: "all-completed", + Retries: 5, + Routes: []string{}, + SchedulerID: "-", + Scopes: []string{}, + Tags: map[string]string{}, + TaskGroupID: taskID, + WorkerType: "some-worker-type", + } + + tsr, err := myQueue.CreateTask(taskID, taskDef) + fatalOnError(err) + + respJSON, err := json.MarshalIndent(tsr, "", " ") + fatalOnError(err) + + fmt.Println(string(respJSON)) + fmt.Println("") + fmt.Printf("curl -L https://queue.taskcluster.net/v1/task/%v/runs/0/artifacts/public/logs/live.log | gunzip\n", taskID) +} +``` + +## Temporary credentials + +You can generate temporary credentials from permanent credentials using the +go client. This may be useful if you wish to issue credentials to a third +party. See https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials for +more information. Both named and unnamed temporary credentials are supported, +although named credentials are preferred if you are not sure which type to use. + +### Example + +```go +package main + +import ( + "fmt" + "log" + "os" + "strconv" + "time" + + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue" +) + +const ( + taskID = "VESwp9JaRo-XkFN_bemBhw" + runID = 0 + rootURL = "https://tc.example.com" +) + +// This simple demo lists the artifacts in run 0 of task +// VESwp9JaRo-XkFN_bemBhw. It creates permanent credentials from environment +// variables TASKCLUSTER_CLIENT_ID and TASKCLUSTER_ACCESS_TOKEN, and then +// creates temporary credentials, valid for 24 hours, from these permanent +// credentials. It queries the Queue using the temporary credentials, and with +// limited authorized scopes. +// +// Note, the queueClient.ListArtifacts(...) call doesn't require any scopes. +// The generation of temporary credentials, and limiting via authorized scopes +// is purely illustrative. The TASKCLUSTER_CLIENT_ID must satisfy +// auth:create-client:demo-client/taskcluster/clients/client-go, though. +func main() { + permCreds := &tcclient.Credentials{ + ClientID: os.Getenv("TASKCLUSTER_CLIENT_ID"), + AccessToken: os.Getenv("TASKCLUSTER_ACCESS_TOKEN"), + } + tempCreds, err := permCreds.CreateNamedTemporaryCredentials( + "demo-client/taskcluster/clients/client-go", + time.Hour*24, + "assume:legacy-permacred", + ) + if err != nil { + log.Fatalf("Could not create temporary credentials: %v", err) + } + tempCreds.AuthorizedScopes = []string{ + "queue:get-artifact:private/build/*", + } + queueClient, err := tcqueue.New(tempCreds, rootURL) + if err != nil { + // bug in code + log.Fatalf("SERIOUS BUG! Could not create client from generated temporary credentials: %v", err) + } + listArtifactsResponse, err := queueClient.ListArtifacts(taskID, strconv.Itoa(runID), "", "") + if err != nil { + log.Fatalf("Could not call queue.listArtifacts endpoint: %v", err) + } + fmt.Printf("Task %v run %v artifacts:\n", taskID, runID) + for _, artifact := range listArtifactsResponse.Artifacts { + fmt.Printf(" * %v\n", artifact.Name) + } + fmt.Println("Done") +} +``` + +See the [HTTP API godocs](#http-apis) for more information, or browse the [integration +tests](https://github.com/taskcluster/taskcluster/tree/master/clients/client-go/integrationtest) +for further examples. + +## Generating +The libraries provided by this client are auto-generated based on the schema references in this repository. +This is done with the `yarn generate` command, run from the top level of the repository. + +The code which generates the library can all be found under the top level [codegenerator](https://github.com/taskcluster/taskcluster/tree/master/clients/client-go/codegenerator) +directory. diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/creds.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/creds.go new file mode 100644 index 0000000..d47a984 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/creds.go @@ -0,0 +1,233 @@ +package tcclient + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/taskcluster/jsonschema2go/text" + "github.com/taskcluster/slugid-go/slugid" +) + +// Credentials represents the set of credentials required to access protected +// Taskcluster HTTP APIs. +type Credentials struct { + // ClientID + ClientID string `json:"clientId"` + // AccessToken + AccessToken string `json:"accessToken"` + // Certificate used only for temporary credentials + Certificate string `json:"certificate"` + // AuthorizedScopes if set to nil, is ignored. Otherwise, it should be a + // subset of the scopes that the ClientId already has, and restricts the + // Credentials to only having these scopes. This is useful when performing + // actions on behalf of a client which has more restricted scopes. Setting + // to nil is not the same as setting to an empty array. If AuthorizedScopes + // is set to an empty array rather than nil, this is equivalent to having + // no scopes at all. + // See https://docs.taskcluster.net/docs/manual/design/apis/hawk/authorized-scopes + AuthorizedScopes []string `json:"authorizedScopes"` +} + +func (creds *Credentials) String() string { + return fmt.Sprintf( + "ClientId: %q\nAccessToken: %q\nCertificate: %q\nAuthorizedScopes: %q", + creds.ClientID, + text.StarOut(creds.AccessToken), + creds.Certificate, + creds.AuthorizedScopes, + ) +} + +// Client is the entry point into all the functionality in this package. It +// contains authentication credentials, and a service endpoint, which are +// required for all HTTP operations. +type Client struct { + Credentials *Credentials + // The Root URL of the Taskcluster deployment + RootURL string + // The (short) name of the service being accessed + ServiceName string + // The API version of the service being accessed + APIVersion string + // Whether authentication is enabled (e.g. set to 'false' when using taskcluster-proxy) + Authenticate bool + // HTTPClient is a ReducedHTTPClient to be used for the http call instead of + // the DefaultHTTPClient. + HTTPClient ReducedHTTPClient + // Context that aborts all requests with this client + Context context.Context +} + +// Certificate represents the certificate used in Temporary Credentials. See +// https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials +type Certificate struct { + Version int `json:"version"` + Scopes []string `json:"scopes"` + Start int64 `json:"start"` + Expiry int64 `json:"expiry"` + Seed string `json:"seed"` + Signature string `json:"signature"` + Issuer string `json:"issuer,omitempty"` +} + +// CreateNamedTemporaryCredentials generates temporary credentials from permanent +// credentials, valid for the given duration, starting immediately. The +// temporary credentials' scopes must be a subset of the permanent credentials' +// scopes. The duration may not be more than 31 days. Any authorized scopes of +// the permanent credentials will be passed through as authorized scopes to the +// temporary credentials, but will not be restricted via the certificate. +// +// Note that the auth service already applies a 5 minute clock skew to the +// start and expiry times in +// https://github.com/taskcluster/taskcluster-auth/pull/117 so no clock skew is +// applied in this method, nor should be applied by the caller. +// +// See https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials +func (permaCreds *Credentials) CreateNamedTemporaryCredentials(tempClientID string, duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) { + if duration > 31*24*time.Hour { + return nil, errors.New("Temporary credentials must expire within 31 days; however a duration of " + duration.String() + " was specified to (*tcclient.Client).CreateTemporaryCredentials(...) method") + } + + now := time.Now() + start := now + expiry := now.Add(duration) + + if permaCreds.ClientID == "" { + return nil, errors.New("Temporary credentials cannot be created from credentials that have an empty ClientId") + } + if permaCreds.AccessToken == "" { + return nil, errors.New("Temporary credentials cannot be created from credentials that have an empty AccessToken") + } + if permaCreds.Certificate != "" { + return nil, errors.New("Temporary credentials cannot be created from temporary credentials, only from permanent credentials") + } + + cert := &Certificate{ + Version: 1, + Scopes: scopes, + Start: start.UnixNano() / 1e6, + Expiry: expiry.UnixNano() / 1e6, + Seed: slugid.V4() + slugid.V4(), + Signature: "", // gets set in Sign() method below + } + // include the issuer iff this is a named credential + if tempClientID != "" { + cert.Issuer = permaCreds.ClientID + } + + cert.Sign(permaCreds.AccessToken, tempClientID) + + certBytes, err := json.Marshal(cert) + if err != nil { + return + } + + tempAccessToken, err := generateTemporaryAccessToken(permaCreds.AccessToken, cert.Seed) + if err != nil { + return + } + + tempCreds = &Credentials{ + ClientID: permaCreds.ClientID, + AccessToken: tempAccessToken, + Certificate: string(certBytes), + AuthorizedScopes: permaCreds.AuthorizedScopes, + } + if tempClientID != "" { + tempCreds.ClientID = tempClientID + } + + return +} + +// CreateTemporaryCredentials is an alias for CreateNamedTemporaryCredentials +// with an empty name. +func (permaCreds *Credentials) CreateTemporaryCredentials(duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) { + return permaCreds.CreateNamedTemporaryCredentials("", duration, scopes...) +} + +func (cert *Certificate) Sign(accessToken string, tempClientID string) (err error) { + lines := []string{"version:" + strconv.Itoa(cert.Version)} + // iff this is a named credential, include clientId and issuer + if cert.Issuer != "" { + lines = append(lines, + "clientId:"+tempClientID, + "issuer:"+cert.Issuer, + ) + } + lines = append(lines, + "seed:"+cert.Seed, + "start:"+strconv.FormatInt(cert.Start, 10), + "expiry:"+strconv.FormatInt(cert.Expiry, 10), + "scopes:", + ) + lines = append(lines, cert.Scopes...) + hash := hmac.New(sha256.New, []byte(accessToken)) + text := strings.Join(lines, "\n") + _, err = hash.Write([]byte(text)) + if err != nil { + return err + } + cert.Signature = base64.StdEncoding.EncodeToString(hash.Sum([]byte{})) + return +} + +func generateTemporaryAccessToken(permAccessToken, seed string) (tempAccessToken string, err error) { + hash := hmac.New(sha256.New, []byte(permAccessToken)) + _, err = hash.Write([]byte(seed)) + if err != nil { + return "", err + } + tempAccessToken = strings.TrimRight(base64.URLEncoding.EncodeToString(hash.Sum([]byte{})), "=") + return +} + +// Cert attempts to parse the certificate string to return it as an object. If +// the certificate is an empty string (e.g. in the case of permanent +// credentials) then a nil pointer is returned for the certificate. If a +// certificate has been specified but cannot be parsed, an error is returned, +// and cert is an empty certificate (rather than nil). +func (creds *Credentials) Cert() (cert *Certificate, err error) { + if creds.Certificate == "" { + return + } + cert = new(Certificate) + err = json.Unmarshal([]byte(creds.Certificate), cert) + return +} + +// CredentialsFromEnvVars creates and returns Taskcluster credentials +// initialised from the values of environment variables: +// +// TASKCLUSTER_CLIENT_ID +// TASKCLUSTER_ACCESS_TOKEN +// TASKCLUSTER_CERTIFICATE +// +// No validation is performed on the assigned values, and unset environment +// variables will result in empty string values. +func CredentialsFromEnvVars() *Credentials { + return &Credentials{ + ClientID: os.Getenv("TASKCLUSTER_CLIENT_ID"), + AccessToken: os.Getenv("TASKCLUSTER_ACCESS_TOKEN"), + Certificate: os.Getenv("TASKCLUSTER_CERTIFICATE"), + } +} + +// RootURLFromEnvVars returns the value of environment variable +// TASKCLUSTER_PROXY_URL if set to a non-empty string, otherwise the value of +// TASKCLUSTER_ROOT_URL if set, otherwise the empty string. +func RootURLFromEnvVars() string { + if proxyURL := os.Getenv("TASKCLUSTER_PROXY_URL"); proxyURL != "" { + return proxyURL + } + return os.Getenv("TASKCLUSTER_ROOT_URL") +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.mod b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.mod new file mode 100644 index 0000000..40b753c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.mod @@ -0,0 +1,26 @@ +module github.com/taskcluster/taskcluster/clients/client-go/v23 + +go 1.12 + +require ( + github.com/fatih/camelcase v1.0.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 + github.com/taskcluster/httpbackoff/v3 v3.0.0 + github.com/taskcluster/jsonschema2go v1.0.0 + github.com/taskcluster/pulse-go v1.0.0 + github.com/taskcluster/slugid-go v1.1.0 + github.com/taskcluster/taskcluster-base-go v1.0.0 + github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible + github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.1.0 // indirect + golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386 + gopkg.in/yaml.v2 v2.2.2 // indirect +) + +// https://bugzilla.mozilla.org/show_bug.cgi?id=1580513 +replace gopkg.in/yaml.v2 => github.com/go-yaml/yaml v0.0.0-20181115110504-51d6538a90f8 + +replace gopkg.in/check.v1 => github.com/go-check/check v0.0.0-20190902080502-41f04d3bba15 diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.sum b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.sum new file mode 100644 index 0000000..914730c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.sum @@ -0,0 +1,73 @@ +github.com/cenkalti/backoff v0.0.0-20190506075156-2146c9339422 h1:+FKjzBIdfBHYDvxCv+djmDJdes/AoDtg8gpcxowBlF8= +github.com/cenkalti/backoff v0.0.0-20190506075156-2146c9339422/go.mod h1:b6Nc7NRH5C4aCISLry0tLnTjcuTEvoiqcWDdsU0sOGM= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-check/check v0.0.0-20161208181325-20d25e280405/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-check/check v0.0.0-20190902080502-41f04d3bba15/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-yaml/yaml v0.0.0-20181115110504-51d6538a90f8 h1:lMJ4rStmFyGCSg/zzEO1iTNQ8oq7YKFXIQocfGdsrRc= +github.com/go-yaml/yaml v0.0.0-20181115110504-51d6538a90f8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190819003559-eade30b20f1d h1:ciocj2R6f6p+WgiA3JlmM6/v9w3Ld/g6ENr9YgHBgxM= +github.com/streadway/amqp v0.0.0-20190819003559-eade30b20f1d/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827080102-edfb9018d271 h1:9ZltBYfOn7l3lpNgPb+vGbTc00IBlC+iJv7YII11rNo= +github.com/streadway/amqp v0.0.0-20190827080102-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/taskcluster/httpbackoff v1.0.0 h1:bdh5txPv6geBVSEcx7Jy3kqiBaIrCZJwzCotPJKf9DU= +github.com/taskcluster/httpbackoff v1.0.0/go.mod h1:DEx05B3r52XQRbgzZ5y6XorMjVXBhtoHgc/ap+yLXgY= +github.com/taskcluster/httpbackoff/v3 v3.0.0 h1:Zh2BCW2iA3fzBBuZo2E4MvwyPSB6aimyI4EreeK3TRM= +github.com/taskcluster/httpbackoff/v3 v3.0.0/go.mod h1:99ubellEC0fmRj7wnGkPftT2xfCY7NmbjT3gzn2ZPUM= +github.com/taskcluster/jsonschema2go v1.0.0 h1:ZEDj2NKh8Sceq36zyLhSV6ann/aNXKZIe9cAXq7CDdk= +github.com/taskcluster/jsonschema2go v1.0.0/go.mod h1:jhsT3XPj3iLNRx0efJVfFzZBZgxeYE7IHfZAai8wuKQ= +github.com/taskcluster/pulse-go v1.0.0 h1:ys4ZUNp5TYiV5LSMxge4YF/AtuBUNH9StAA/bkno+r0= +github.com/taskcluster/pulse-go v1.0.0/go.mod h1:uuaqnRQj9XqouabCEKjnrlJiC6UT9Gurx2oSe6s+irM= +github.com/taskcluster/slugid-go v1.0.1 h1:qcuE5VYQ9a7IW5EUNQ63MoCC30LNJfzo56teJPIWKB8= +github.com/taskcluster/slugid-go v1.0.1/go.mod h1:a6qZo5JMShWvjDrupo8HLd+FVldMYW0Tc7YTrLT1ML4= +github.com/taskcluster/slugid-go v1.1.0 h1:SWsUplliyamdYzOKVM4+lDohZKuL63fKreGkvIKJ9aI= +github.com/taskcluster/slugid-go v1.1.0/go.mod h1:5sOAcPHjqso1UkKxSl77CkKgOwha0D9X0msBKBj0AOg= +github.com/taskcluster/taskcluster-base-go v1.0.0 h1:Jh2R/J7+a23LjtYEHQtkFV04QBxx6EVX8E0PrzUqJo4= +github.com/taskcluster/taskcluster-base-go v1.0.0/go.mod h1:ByyzyqqufsfZTrAHUw+0Grp8FwZAizZOKzVE1IpDXxQ= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible h1:57WLzh7B04y6ahTOJ8wjvdkbwYqnyJkwLXQ1Tu4E/DU= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible/go.mod h1:ALqTgi15AmJGEGubRKM0ydlLAFatlQPrQrmal9YZpQs= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 h1:6Fre/uvwovW5YY4nfHZk66cAg9HjT9YdFSAJHUUgOyQ= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957/go.mod h1:dch7ywQEefE1ibFqBG1erFibrdUIwovcwQjksYuHuP4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190812004523-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638 h1:uIfBkD8gLczr4XDgYpt/qJYds2YJwZRNw4zs7wSnNhk= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190701013530-fb37f6ba8261 h1:sZzBoJ2n8UwkIuiimaNrJkotv5wGSBI+QzjunXUjLQE= +golang.org/x/tools v0.0.0-20190701013530-fb37f6ba8261/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190708122732-72ffa07ba3db h1:A+9p+kHB/5+SP59X7Zyu7j90kc60c6G//CUYAAayKEE= +golang.org/x/tools v0.0.0-20190708122732-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190715045107-607ca053a137 h1:fTnKBCUDrF9O76dW88IygY9ki9DkVkHaY2TXgzHcyes= +golang.org/x/tools v0.0.0-20190715045107-607ca053a137/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386 h1:W/t3IYUOQPd8DK2ssOWA8sjulHHMxzTgiQkSx0z5sRQ= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/http.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/http.go new file mode 100644 index 0000000..3e7ff8e --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/http.go @@ -0,0 +1,337 @@ +package tcclient + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + // "net/http/httputil" + "net/url" + "reflect" + "time" + + "github.com/taskcluster/httpbackoff/v3" + hawk "github.com/tent/hawk-go" + tcurls "github.com/taskcluster/taskcluster-lib-urls" +) + +var debug = false + +func init() { + if _, ok := os.LookupEnv("TASKCLUSTER_DEBUG"); ok { + debug = true + } +} + +// CallSummary provides information about the underlying http request and +// response issued for a given API call. +type CallSummary struct { + HTTPRequest *http.Request + // Keep a copy of request body in addition to the *http.Request, since + // accessing the Body via the *http.Request object, you get a io.ReadCloser + // - and after the request has been made, the body will have been read, and + // the data lost... This way, it is still available after the api call + // returns. + HTTPRequestBody string + // The Go Type which is marshaled into json and used as the http request + // body. + HTTPRequestObject interface{} + HTTPResponse *http.Response + // Keep a copy of response body in addition to the *http.Response, since + // accessing the Body via the *http.Response object, you get a + // io.ReadCloser - and after the response has been read once (to unmarshal + // json into native go types) the data is lost... This way, it is still + // available after the api call returns. + HTTPResponseBody string + // Keep a record of how many http requests were attempted + Attempts int +} + +func (cs *CallSummary) String() string { + s := "\nCALL SUMMARY\n============\n" + if req := cs.HTTPRequest; req != nil { + s += fmt.Sprintf("Method: %v\n", req.Method) + if debug { + if req.URL != nil { + s += fmt.Sprintf("URL: %v\n", req.URL) + } + s += fmt.Sprintf("Request Headers:\n%#v\n", req.Header) + s += fmt.Sprintf("Request Body:\n%v\n", cs.HTTPRequestBody) + if resp := cs.HTTPResponse; resp != nil { + s += fmt.Sprintf("Response Headers:\n%#v\n", cs.HTTPResponse.Header) + } + } else { + if req.URL != nil { + s += fmt.Sprintf("Service: %s:%s\n", req.URL.Hostname(), req.URL.Port()) + } + } + } + s += fmt.Sprintf("Response Body:\n%v\n", cs.HTTPResponseBody) + s += fmt.Sprintf("Attempts: %v", cs.Attempts) + return s +} + +type APICall struct { + Client *Client + Route string + QueryString url.Values + Payload io.Reader +} + +// ReducedHTTPClient is the interface that wraps the functionality of +// http.Client that we actually use in Client.APICall. +type ReducedHTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// defaultHTTPClient is the HTTP Client used to make requests if none are +// defined in the client. +// A single object is created and used because http.Client is thread-safe when +// making multiple requests in various goroutines. +var defaultHTTPClient ReducedHTTPClient = &http.Client{} + +// utility function to create a URL object based on given data +func setURL(client *Client, route string, query url.Values) (u *url.URL, err error) { + URL := tcurls.API(client.RootURL, client.ServiceName, client.APIVersion, route) + u, err = url.Parse(URL) + if err != nil { + return nil, fmt.Errorf("Cannot parse url: '%v', is RootURL (%v) set correctly?\n%v\n", URL, client.RootURL, err) + } + if query != nil { + u.RawQuery = query.Encode() + } + return +} + +// Request is the underlying method that makes a raw API request, without +// performing any json marshaling/unmarshaling of requests/responses. It is +// useful if you wish to handle raw payloads and/or raw http response bodies, +// rather than calling APICall which translates []byte to/from go types. +func (client *Client) Request(rawPayload []byte, method, route string, query url.Values) (*CallSummary, error) { + callSummary := new(CallSummary) + callSummary.HTTPRequestBody = string(rawPayload) + + // function to perform http request - we call this using backoff library to + // have exponential backoff in case of intermittent failures (e.g. network + // blips or HTTP 5xx errors) + httpCall := func() (*http.Response, error, error) { + var ioReader io.Reader + ioReader = bytes.NewReader(rawPayload) + u, err := setURL(client, route, query) + if err != nil { + return nil, nil, fmt.Errorf("apiCall url cannot be parsed:\n%v\n", err) + } + callSummary.HTTPRequest, err = http.NewRequest(method, u.String(), ioReader) + if err != nil { + return nil, nil, fmt.Errorf("Internal error: apiCall url cannot be parsed although thought to be valid: '%v', is the RootURL (%v) set correctly?\n%v\n", u.String(), client.RootURL, err) + } + if len(rawPayload) > 0 { + callSummary.HTTPRequest.Header.Set("Content-Type", "application/json") + } + // Refresh Authorization header with each call... + // Only authenticate if client library user wishes to. + if client.Authenticate { + err = client.Credentials.SignRequest(callSummary.HTTPRequest) + if err != nil { + return nil, nil, err + } + } + // Set context if one is given + if client.Context != nil { + callSummary.HTTPRequest = callSummary.HTTPRequest.WithContext(client.Context) + } + var resp *http.Response + if client.HTTPClient != nil { + resp, err = client.HTTPClient.Do(callSummary.HTTPRequest) + } else { + resp, err = defaultHTTPClient.Do(callSummary.HTTPRequest) + } + // return cancelled error, if context was cancelled + if client.Context != nil && client.Context.Err() != nil { + return nil, nil, client.Context.Err() + } + // b, e := httputil.DumpResponse(resp, true) + // if e == nil { + // fmt.Println(string(b)) + // } + return resp, err, nil + } + + // Make HTTP API calls using an exponential backoff algorithm... + var err error + callSummary.HTTPResponse, callSummary.Attempts, err = httpbackoff.Retry(httpCall) + + // read response into memory, so that we can return the body + if callSummary.HTTPResponse != nil { + body, err2 := ioutil.ReadAll(callSummary.HTTPResponse.Body) + if err2 == nil { + callSummary.HTTPResponseBody = string(body) + } + } + + return callSummary, err + +} + +// SignRequest will add an Authorization header +func (c *Credentials) SignRequest(req *http.Request) (err error) { + // s, err := c.SignHeader(req.Method, req.URL.String(), hash) + // req.Header.Set("Authorization", s) + // return err + + credentials := &hawk.Credentials{ + ID: c.ClientID, + Key: c.AccessToken, + Hash: sha256.New, + } + reqAuth := hawk.NewRequestAuth(req, credentials, 0) + reqAuth.Ext, err = getExtHeader(c) + if err != nil { + return fmt.Errorf("Internal error: was not able to generate hawk ext header from provided credentials:\n%s\n%s", c, err) + } + req.Header.Set("Authorization", reqAuth.RequestHeader()) + return nil +} + +type APICallException struct { + CallSummary *CallSummary + RootCause error +} + +func (err *APICallException) Error() string { + return err.CallSummary.String() + "\n" + err.RootCause.Error() +} + +// APICall is the generic REST API calling method which performs all REST API +// calls for this library. Each auto-generated REST API method simply is a +// wrapper around this method, calling it with specific specific arguments. +func (client *Client) APICall(payload interface{}, method, route string, result interface{}, query url.Values) (interface{}, *CallSummary, error) { + rawPayload := []byte{} + var err error + if reflect.ValueOf(payload).IsValid() && !reflect.ValueOf(payload).IsNil() { + rawPayload, err = json.Marshal(payload) + if err != nil { + cs := &CallSummary{ + HTTPRequestObject: payload, + } + return result, + cs, + &APICallException{ + CallSummary: cs, + RootCause: err, + } + } + } + callSummary, err := client.Request(rawPayload, method, route, query) + callSummary.HTTPRequestObject = payload + if err != nil { + // If context failed during this request, then we should just return that error + if client.Context != nil && client.Context.Err() != nil { + return result, callSummary, client.Context.Err() + } + return result, + callSummary, + &APICallException{ + CallSummary: callSummary, + RootCause: err, + } + } + // if result is passed in as nil, it means the API defines no response body + // json + if reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() { + err = json.Unmarshal([]byte(callSummary.HTTPResponseBody), &result) + } + + if err != nil { + return result, + callSummary, + &APICallException{ + CallSummary: callSummary, + RootCause: err, + } + } + return result, callSummary, nil +} + +// SignedURL creates a signed URL using the given Client, where route is the +// url path relative to the RootURL stored in the Client, query is the set of +// query string parameters, if any, and duration is the amount of time that the +// signed URL should remain valid for. +func (client *Client) SignedURL(route string, query url.Values, duration time.Duration) (u *url.URL, err error) { + u, err = setURL(client, route, query) + if err != nil { + return + } + credentials := &hawk.Credentials{ + ID: client.Credentials.ClientID, + Key: client.Credentials.AccessToken, + Hash: sha256.New, + } + reqAuth, err := hawk.NewURLAuth(u.String(), credentials, duration) + if err != nil { + return + } + reqAuth.Ext, err = getExtHeader(client.Credentials) + if err != nil { + return + } + bewitSignature := reqAuth.Bewit() + if query == nil { + query = url.Values{} + } + query.Set("bewit", bewitSignature) + u.RawQuery = query.Encode() + return +} + +// getExtHeader generates the hawk ext header based on the authorizedScopes and +// the certificate used in the case of temporary credentials. The header is a +// base64 encoded json object with a "certificate" property set to the +// certificate of the temporary credentials and a "authorizedScopes" property +// set to the array of authorizedScopes, if provided. If either "certificate" +// or "authorizedScopes" is not supplied, they will be omitted from the json +// result. If neither are provided, an empty string is returned, rather than a +// base64 encoded representation of "null" or "{}". Hawk interprets the empty +// string as meaning the ext header is not needed. +// +// See: +// * https://docs.taskcluster.net/docs/manual/design/apis/hawk/authorized-scopes +// * https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials +func getExtHeader(credentials *Credentials) (header string, err error) { + ext := &ExtHeader{} + if credentials.Certificate != "" { + certObj := new(Certificate) + err = json.Unmarshal([]byte(credentials.Certificate), certObj) + if err != nil { + return "", err + } + ext.Certificate = certObj + } + + if credentials.AuthorizedScopes != nil { + ext.AuthorizedScopes = &credentials.AuthorizedScopes + } + extJSON, err := json.Marshal(ext) + if err != nil { + return "", err + } + if string(extJSON) != "{}" { + return base64.StdEncoding.EncodeToString(extJSON), nil + } + return "", nil +} + +// ExtHeader represents the authentication/authorization data that is contained +// in the ext field inside the base64 decoded `Authorization` HTTP header in +// outgoing Hawk HTTP requests. +type ExtHeader struct { + Certificate *Certificate `json:"certificate,omitempty"` + // use pointer to slice to distinguish between nil slice and empty slice + AuthorizedScopes *[]string `json:"authorizedScopes,omitempty"` +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/tcqueueevents.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/tcqueueevents.go new file mode 100644 index 0000000..58e4d17 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/tcqueueevents.go @@ -0,0 +1,374 @@ +// The following code is AUTO-GENERATED. Please DO NOT edit. +// To update this generated code, run the following command: +// in the /codegenerator/model subdirectory of this project, +// making sure that `${GOPATH}/bin` is in your `PATH`: +// +// go install && go generate +// +// This package was generated from the schema defined at +// /references/queue/v1/exchanges.json + +// The queue service is responsible for accepting tasks and track their state +// as they are executed by workers. In order ensure they are eventually +// resolved. +// +// This document describes AMQP exchanges offered by the queue, which allows +// third-party listeners to monitor tasks as they progress to resolution. +// These exchanges targets the following audience: +// * Schedulers, who takes action after tasks are completed, +// * Workers, who wants to listen for new or canceled tasks (optional), +// * Tools, that wants to update their view as task progress. +// +// You'll notice that all the exchanges in the document shares the same +// routing key pattern. This makes it very easy to bind to all messages +// about a certain kind tasks. +// +// **Task specific routes**, a task can define a task specific route using +// the `task.routes` property. See task creation documentation for details +// on permissions required to provide task specific routes. If a task has +// the entry `'notify.by-email'` in as task specific route defined in +// `task.routes` all messages about this task will be CC'ed with the +// routing-key `'route.notify.by-email'`. +// +// These routes will always be prefixed `route.`, so that cannot interfere +// with the _primary_ routing key as documented here. Notice that the +// _primary_ routing key is always prefixed `primary.`. This is ensured +// in the routing key reference, so API clients will do this automatically. +// +// Please, note that the way RabbitMQ works, the message will only arrive +// in your queue once, even though you may have bound to the exchange with +// multiple routing key patterns that matches more of the CC'ed routing +// routing keys. +// +// **Delivery guarantees**, most operations on the queue are idempotent, +// which means that if repeated with the same arguments then the requests +// will ensure completion of the operation and return the same response. +// This is useful if the server crashes or the TCP connection breaks, but +// when re-executing an idempotent operation, the queue will also resend +// any related AMQP messages. Hence, messages may be repeated. +// +// This shouldn't be much of a problem, as the best you can achieve using +// confirm messages with AMQP is at-least-once delivery semantics. Hence, +// this only prevents you from obtaining at-most-once delivery semantics. +// +// **Remark**, some message generated by timeouts maybe dropped if the +// server crashes at wrong time. Ideally, we'll address this in the +// future. For now we suggest you ignore this corner case, and notify us +// if this corner case is of concern to you. +// +// See: +// +// How to use this package +// +// This package is designed to sit on top of http://godoc.org/github.com/taskcluster/pulse-go/pulse. Please read +// the pulse package overview to get an understanding of how the pulse client is implemented in go. +// +// This package provides two things in addition to the basic pulse package: structured types for unmarshaling +// pulse message bodies into, and custom Binding interfaces, for defining the fixed strings for task cluster +// exchange names, and routing keys as structured types. +// +// For example, when specifying a binding, rather than using: +// +// pulse.Bind( +// "*.*.*.*.*.*.gaia.#", +// "exchange/taskcluster-queue/v1/task-defined", +// ) +// +// You can rather use: +// +// queueevents.TaskDefined{WorkerType: "gaia"} +// +// In addition, this means that you will also get objects in your callback method like *queueevents.TaskDefinedMessage +// rather than just interface{}. +package tcqueueevents + +import ( + "reflect" + "strings" +) + +// When a task is created or just defined a message is posted to this +// exchange. +// +// This message exchange is mainly useful when tasks are scheduled by a +// scheduler that uses `defineTask` as this does not make the task +// `pending`. Thus, no `taskPending` message is published. +// Please, note that messages are also published on this exchange if defined +// using `createTask`. +// +// See #taskDefined +type TaskDefined struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskDefined) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskDefined) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-defined" +} + +func (binding TaskDefined) NewPayloadObject() interface{} { + return new(TaskDefinedMessage) +} + +// When a task becomes `pending` a message is posted to this exchange. +// +// This is useful for workers who doesn't want to constantly poll the queue +// for new tasks. The queue will also be authority for task states and +// claims. But using this exchange workers should be able to distribute work +// efficiently and they would be able to reduce their polling interval +// significantly without affecting general responsiveness. +// +// See #taskPending +type TaskPending struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskPending) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskPending) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-pending" +} + +func (binding TaskPending) NewPayloadObject() interface{} { + return new(TaskPendingMessage) +} + +// Whenever a task is claimed by a worker, a run is started on the worker, +// and a message is posted on this exchange. +// +// See #taskRunning +type TaskRunning struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskRunning) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskRunning) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-running" +} + +func (binding TaskRunning) NewPayloadObject() interface{} { + return new(TaskRunningMessage) +} + +// Whenever the `createArtifact` end-point is called, the queue will create +// a record of the artifact and post a message on this exchange. All of this +// happens before the queue returns a signed URL for the caller to upload +// the actual artifact with (pending on `storageType`). +// +// This means that the actual artifact is rarely available when this message +// is posted. But it is not unreasonable to assume that the artifact will +// will become available at some point later. Most signatures will expire in +// 30 minutes or so, forcing the uploader to call `createArtifact` with +// the same payload again in-order to continue uploading the artifact. +// +// However, in most cases (especially for small artifacts) it's very +// reasonable assume the artifact will be available within a few minutes. +// This property means that this exchange is mostly useful for tools +// monitoring task evaluation. One could also use it count number of +// artifacts per task, or _index_ artifacts though in most cases it'll be +// smarter to index artifacts after the task in question have completed +// successfully. +// +// *NOTE*: this message is currently only sent for reference and error +// artifacts. This will be remedied in a future version of Taskcluster. +// +// See #artifactCreated +type ArtifactCreated struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding ArtifactCreated) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding ArtifactCreated) ExchangeName() string { + return "exchange/taskcluster-queue/v1/artifact-created" +} + +func (binding ArtifactCreated) NewPayloadObject() interface{} { + return new(ArtifactCreatedMessage) +} + +// When a task is successfully completed by a worker a message is posted +// this exchange. +// This message is routed using the `runId`, `workerGroup` and `workerId` +// that completed the task. But information about additional runs is also +// available from the task status structure. +// +// See #taskCompleted +type TaskCompleted struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskCompleted) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskCompleted) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-completed" +} + +func (binding TaskCompleted) NewPayloadObject() interface{} { + return new(TaskCompletedMessage) +} + +// When a task ran, but failed to complete successfully a message is posted +// to this exchange. This is same as worker ran task-specific code, but the +// task specific code exited non-zero. +// +// See #taskFailed +type TaskFailed struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskFailed) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskFailed) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-failed" +} + +func (binding TaskFailed) NewPayloadObject() interface{} { + return new(TaskFailedMessage) +} + +// Whenever Taskcluster fails to run a message is posted to this exchange. +// This happens if the task isn't completed before its `deadlìne`, +// all retries failed (i.e. workers stopped responding), the task was +// canceled by another entity, or the task carried a malformed payload. +// +// The specific _reason_ is evident from that task status structure, refer +// to the `reasonResolved` property for the last run. +// +// See #taskException +type TaskException struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskException) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskException) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-exception" +} + +func (binding TaskException) NewPayloadObject() interface{} { + return new(TaskExceptionMessage) +} + +// A message is published on task-group-resolved whenever all submitted +// tasks (whether scheduled or unscheduled) for a given task group have +// been resolved, regardless of whether they resolved as successful or +// not. A task group may be resolved multiple times, since new tasks may +// be submitted against an already resolved task group. +// +// See #taskGroupResolved +type TaskGroupResolved struct { + RoutingKeyKind string `mwords:"*"` + TaskGroupID string `mwords:"*"` + SchedulerID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskGroupResolved) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskGroupResolved) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-group-resolved" +} + +func (binding TaskGroupResolved) NewPayloadObject() interface{} { + return new(TaskGroupResolvedMessage) +} + +func generateRoutingKey(x interface{}) string { + val := reflect.ValueOf(x).Elem() + p := make([]string, 0, val.NumField()) + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + tag := typeField.Tag + if t := tag.Get("mwords"); t != "" { + if v := valueField.Interface(); v == "" { + p = append(p, t) + } else { + p = append(p, v.(string)) + } + } + } + return strings.Join(p, ".") +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/types.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/types.go new file mode 100644 index 0000000..d496257 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/types.go @@ -0,0 +1,533 @@ +// This source code file is AUTO-GENERATED by github.com/taskcluster/jsonschema2go + +package tcqueueevents + +import ( + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" +) + +type ( + // Information about the artifact that was created + Artifact struct { + + // Mimetype for the artifact that was created. + // + // Max length: 255 + ContentType string `json:"contentType"` + + // Date and time after which the artifact created will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Name of the artifact that was created, this is useful if you want to + // attempt to fetch the artifact. But keep in mind that just because an + // artifact is created doesn't mean that it's immediately available. + // + // Max length: 1024 + Name string `json:"name"` + + // This is the `storageType` for the request that was used to create the + // artifact. + // + // Possible values: + // * "reference" + // * "error" + StorageType string `json:"storageType"` + } + + // Message reporting a new artifact has been created for a given task. + ArtifactCreatedMessage struct { + + // Information about the artifact that was created + Artifact Artifact `json:"artifact"` + + // Id of the run on which artifact was created. + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which the run with the created + // artifacted is running. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker within which the run with the created artifact + // is running. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // JSON object with information about a run + RunInformation struct { + + // Reason for the creation of this run, + // **more reasons may be added in the future**. + // + // Possible values: + // * "scheduled" + // * "retry" + // * "task-retry" + // * "rerun" + // * "exception" + ReasonCreated string `json:"reasonCreated"` + + // Reason that run was resolved, this is mainly + // useful for runs resolved as `exception`. + // Note, **more reasons may be added in the future**, also this + // property is only available after the run is resolved. Some of these + // reasons, notably `intermittent-task`, `worker-shutdown`, and + // `claim-expired`, will trigger an automatic retry of the task. + // + // Possible values: + // * "completed" + // * "failed" + // * "deadline-exceeded" + // * "canceled" + // * "superseded" + // * "claim-expired" + // * "worker-shutdown" + // * "malformed-payload" + // * "resource-unavailable" + // * "internal-error" + // * "intermittent-task" + ReasonResolved string `json:"reasonResolved,omitempty"` + + // Date-time at which this run was resolved, ie. when the run changed + // state from `running` to either `completed`, `failed` or `exception`. + // This property is only present after the run as been resolved. + Resolved tcclient.Time `json:"resolved,omitempty"` + + // Id of this task run, `run-id`s always starts from `0` + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // Date-time at which this run was scheduled, ie. when the run was + // created in state `pending`. + Scheduled tcclient.Time `json:"scheduled"` + + // Date-time at which this run was claimed, ie. when the run changed + // state from `pending` to `running`. This property is only present + // after the run has been claimed. + Started tcclient.Time `json:"started,omitempty"` + + // State of this run + // + // Possible values: + // * "pending" + // * "running" + // * "completed" + // * "failed" + // * "exception" + State string `json:"state"` + + // Time at which the run expires and is resolved as `failed`, if the + // run isn't reclaimed. Note, only present after the run has been + // claimed. + TakenUntil tcclient.Time `json:"takenUntil,omitempty"` + + // Identifier for group that worker who executes this run is a part of, + // this identifier is mainly used for efficient routing. + // Note, this property is only present after the run is claimed. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup,omitempty"` + + // Identifier for worker evaluating this run within given + // `workerGroup`. Note, this property is only available after the run + // has been claimed. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId,omitempty"` + } + + // Subset of a task definition + Task struct { + + // Arbitrary key-value tags (only strings limited to 4k). These can be used + // to attach informal metadata to a task. Use this for informal tags that + // tasks can be classified by. You can also think of strings here as + // candidates for formal metadata. Something like + // `purpose: 'build' || 'test'` is a good example. + // + // Default: {} + // + // Map entries: + // Max length: 4096 + Tags map[string]string `json:"tags"` + } + + // Message reporting that a task has complete successfully. + TaskCompletedMessage struct { + + // Id of the run that completed the task + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which this run ran. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker that executed this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Message reporting that a task has been defined. The task may or may not be + // _scheduled_ too. + TaskDefinedMessage struct { + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + } + + // Message reporting that Taskcluster have failed to run a task. + TaskExceptionMessage struct { + + // Id of the last run for the task, not provided if `deadline` + // was exceeded before a run was started. + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId,omitempty"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which the last attempt of the task + // ran. Not provided, if `deadline` was exceeded before a run was started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup,omitempty"` + + // Identifier for the last worker that failed to report, causing the task + // to fail. Not provided, if `deadline` was exceeded before a run + // was started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId,omitempty"` + } + + // Message reporting that a task failed to complete successfully. + TaskFailedMessage struct { + + // Id of the run that failed. + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which this run ran. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker that executed this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Message written once a task group has no tasks to be run. It is + // possible for a task group to later have another task added, in which + // case this message will be sent again once it finishes. + TaskGroupResolvedMessage struct { + + // Identifier for the scheduler that created this task-group. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId"` + + // Identifier for the task-group being listed. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version,omitempty"` + } + + // Required task metadata + TaskMetadata struct { + + // Human readable description of the task, please **explain** what the + // task does. A few lines of documentation is not going to hurt you. + // + // Max length: 32768 + Description string `json:"description"` + + // Human readable name of task, used to very briefly given an idea about + // what the task does. + // + // Max length: 255 + Name string `json:"name"` + + // E-mail of person who caused this task, e.g. the person who did + // `hg push`. The person we should contact to ask why this task is here. + // + // Max length: 255 + Owner string `json:"owner"` + + // Link to source of this task, should specify a file, revision and + // repository. This should be place someone can go an do a git/hg blame + // to who came up with recipe for this task. + // + // Syntax: ^https?:// + // Max length: 4096 + Source string `json:"source"` + } + + // Message reporting that a task is now pending + TaskPendingMessage struct { + + // Id of run that became pending, `run-id`s always starts from 0 + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition + Task Task `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + } + + // Message reporting that a given run of a task have started + TaskRunningMessage struct { + + // Id of the run that just started, always starts from 0 + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Time at which the run expires and is resolved as `failed`, if the run + // isn't reclaimed. + TakenUntil tcclient.Time `json:"takenUntil"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which this run started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker executing this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // A representation of **task status** as known by the queue + TaskStatusStructure struct { + + // Deadline of the task, `pending` and `running` runs are + // resolved as **exception** if not resolved by other means + // before the deadline. Note, deadline cannot be more than + // 5 days into the future + Deadline tcclient.Time `json:"deadline"` + + // Task expiration, time at which task definition and + // status is deleted. Notice that all artifacts for the task + // must have an expiration that is no later than this. + Expires tcclient.Time `json:"expires"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // Number of retries left for the task in case of infrastructure issues + // + // Mininum: 0 + // Maximum: 999 + RetriesLeft int64 `json:"retriesLeft"` + + // List of runs, ordered so that index `i` has `runId == i` + Runs []RunInformation `json:"runs"` + + // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: + // + // * it can represent the entity that created the task; + // * it can limit addition of new tasks to a task group: the caller of + // `createTask` must have a scope related to the `schedulerId` of the task + // group; + // * it controls who can manipulate tasks, again by requiring + // `schedulerId`-related scopes; and + // * it appears in the routing key for Pulse messages about the task. + // + // Default: "-" + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId"` + + // State of this task. This is just an auxiliary property derived from state + // of latests run, or `unscheduled` if none. + // + // Possible values: + // * "unscheduled" + // * "pending" + // * "running" + // * "completed" + // * "failed" + // * "exception" + State string `json:"state"` + + // Identifier for a group of tasks scheduled together with this task. + // Generally, all tasks related to a single event such as a version-control + // push or a nightly build have the same `taskGroupId`. This property + // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to + // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), + // decision tasks. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // Unique task identifier, this is UUID encoded as + // [URL-safe base64](http://tools.ietf.org/html/rfc4648#section-5) and + // stripped of `=` padding. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Var struct { + + // Arbitrary key-value tags (only strings limited to 4k). These can be used + // to attach informal metadata to a task. Use this for informal tags that + // tasks can be classified by. You can also think of strings here as + // candidates for formal metadata. Something like + // `purpose: 'build' || 'test'` is a good example. + // + // Default: {} + // + // Map entries: + // Max length: 4096 + Tags map[string]string `json:"tags"` + } +) diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/testcases.json b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/testcases.json new file mode 100644 index 0000000..dd8b631 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/testcases.json @@ -0,0 +1,102 @@ +[ + { + "description": "Unnamed Temp Creds with no Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": null, + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"45AlB/hKZZZz4Tf3NaidutasfgBnr4t2AxwBiGDQF9Q=\"}", + "clientId": "def/ghi@XXX" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": null, + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + }, + { + "description": "Named Temp Creds with no Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": null, + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"nNEaLtZMiw627NuDbF5Z8HDFc57MGWCptXBQSYNFgBk=\",\"issuer\":\"def/ghi@XXX\"}", + "clientId": "abc/def/ghi" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": null, + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "abc/def/ghi", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + }, + { + "description": "Unnamed Temp Creds with Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"45AlB/hKZZZz4Tf3NaidutasfgBnr4t2AxwBiGDQF9Q=\"}", + "clientId": "def/ghi@XXX" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + }, + { + "description": "Named Temp Creds with Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"nNEaLtZMiw627NuDbF5Z8HDFc57MGWCptXBQSYNFgBk=\",\"issuer\":\"def/ghi@XXX\"}", + "clientId": "abc/def/ghi" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "abc/def/ghi", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + } +] diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/time.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/time.go new file mode 100644 index 0000000..fb59f21 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/time.go @@ -0,0 +1,42 @@ +package tcclient + +import ( + "errors" + "time" +) + +// Time wraps time.Time in order that json serialisation/deserialisation can be +// adapted. Marshaling time.Time types results in RFC3339 dates with nanosecond +// precision in the user's timezone. In order that the json date representation +// is consistent between what we send in json payloads, and what taskcluster +// services return, we wrap time.Time into type tcclient.Time which marshals +// instead to the same format used by the Taskcluster services; UTC based, with +// millisecond precision, using 'Z' timezone, e.g. 2015-10-27T20:36:19.255Z. +type Time time.Time + +// MarshalJSON implements the json.Marshaler interface. +// The time is a quoted string in RFC 3339 format, with sub-second precision added if present. +func (t Time) MarshalJSON() ([]byte, error) { + if y := time.Time(t).Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return nil, errors.New("queue.Time.MarshalJSON: year outside of range [0,9999]") + } + return []byte(`"` + t.String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// The time is expected to be a quoted string in RFC 3339 format. +func (t *Time) UnmarshalJSON(data []byte) (err error) { + // Fractional seconds are handled implicitly by Parse. + x := new(time.Time) + *x, err = time.Parse(`"`+time.RFC3339+`"`, string(data)) + *t = Time(*x) + return +} + +// Returns the Time in canonical RFC3339 representation, e.g. +// 2015-10-27T20:36:19.255Z +func (t Time) String() string { + return time.Time(t).UTC().Format("2006-01-02T15:04:05.000Z") +} diff --git a/vendor/github.com/tent/hawk-go/.gitignore b/vendor/github.com/tent/hawk-go/.gitignore new file mode 100644 index 0000000..9ed3b07 --- /dev/null +++ b/vendor/github.com/tent/hawk-go/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/tent/hawk-go/.travis.yml b/vendor/github.com/tent/hawk-go/.travis.yml new file mode 100644 index 0000000..3438247 --- /dev/null +++ b/vendor/github.com/tent/hawk-go/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: + - 1.3 + - tip +before_install: + - go get launchpad.net/gocheck diff --git a/vendor/github.com/tent/hawk-go/LICENSE b/vendor/github.com/tent/hawk-go/LICENSE new file mode 100644 index 0000000..88dcd4a --- /dev/null +++ b/vendor/github.com/tent/hawk-go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Tent.is, LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Tent.is, LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/tent/hawk-go/README.md b/vendor/github.com/tent/hawk-go/README.md new file mode 100644 index 0000000..e3a333f --- /dev/null +++ b/vendor/github.com/tent/hawk-go/README.md @@ -0,0 +1,12 @@ +# hawk-go [![Build Status](https://travis-ci.org/tent/hawk-go.png?branch=master)](https://travis-ci.org/tent/hawk-go) + +hawk-go implements the [Hawk](https://github.com/hueniverse/hawk) HTTP +authentication scheme in Go. + +[**Documentation**](http://godoc.org/github.com/tent/hawk-go) + +## Installation + +```text +go get github.com/tent/hawk-go +``` diff --git a/vendor/github.com/tent/hawk-go/hawk.go b/vendor/github.com/tent/hawk-go/hawk.go new file mode 100644 index 0000000..a17d9c0 --- /dev/null +++ b/vendor/github.com/tent/hawk-go/hawk.go @@ -0,0 +1,691 @@ +// Package hawk implements the Hawk HTTP authentication scheme. +package hawk + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// Now is a func() time.Time that is used by the package to get the current time. +var Now = time.Now + +// MaxTimestampSkew is the maximum ±skew that a request timestamp can have without returning ErrTimestampSkew. +var MaxTimestampSkew = time.Minute + +var ( + ErrNoAuth = AuthError("no Authorization header or bewit parameter found") + ErrReplay = AuthError("request nonce is being replayed") + ErrInvalidMAC = AuthError("invalid MAC") + ErrBewitExpired = AuthError("bewit expired") + ErrTimestampSkew = AuthError("timestamp skew too high") + ErrMissingServerAuth = AuthError("missing Server-Authentication header") + ErrInvalidBewitMethod = AuthError("bewit only allows HEAD and GET requests") +) + +type AuthError string + +func (e AuthError) Error() string { return "hawk: " + string(e) } + +type CredentialErrorType int + +const ( + UnknownID CredentialErrorType = iota + UnknownApp + IDAppMismatch +) + +func (t CredentialErrorType) String() string { + switch t { + case UnknownApp: + return "unknown app" + case IDAppMismatch: + return "id/app mismatch" + } + return "unknown id" +} + +// CredentialError is returned by a CredentialsLookupFunc when the provided credentials +// ID is invalid. +type CredentialError struct { + Type CredentialErrorType + Credentials *Credentials +} + +func (e *CredentialError) Error() string { + return fmt.Sprintf("hawk: credential error with id %s and app %s: %s", e.Credentials.ID, e.Credentials.App, e.Type) +} + +type Credentials struct { + ID string + Key string + Hash func() hash.Hash + + // Data may be set in a CredentialsLookupFunc to correlate the credentials + // with an internal data record. + Data interface{} + + App string + Delegate string +} + +func (creds *Credentials) MAC() hash.Hash { return hmac.New(creds.Hash, []byte(creds.Key)) } + +type AuthType int + +const ( + AuthHeader AuthType = iota + AuthResponse + AuthBewit +) + +func (a AuthType) String() string { + switch a { + case AuthResponse: + return "response" + case AuthBewit: + return "bewit" + default: + return "header" + } +} + +// A CredentialsLookupFunc is called by NewAuthFromRequest after parsing the +// request auth. The Credentials will never be nil and ID will always be set. +// App and Delegate will be set if provided in the request. This function must +// look up the corresponding Key and Hash and set them on the provided +// Credentials. If the Key/Hash are found and the App/Delegate are valid (if +// provided) the error should be nil. If the Key or App could not be found or +// the App does not match the ID, then a CredentialError must be returned. +// Errors will propagate to the caller of NewAuthFromRequest, so internal errors +// may be returned. +type CredentialsLookupFunc func(*Credentials) error + +// A NonceCheckFunc is called by NewAuthFromRequest and should make sure that +// the provided nonce is unique within the context of the provided time.Time and +// Credentials. It should return false if the nonce is being replayed. +type NonceCheckFunc func(string, time.Time, *Credentials) bool + +type AuthFormatError struct { + Field string + Err string +} + +func (e AuthFormatError) Error() string { return "hawk: invalid " + e.Field + ", " + e.Err } + +// ParseRequestHeader parses a Hawk header (provided in the Authorization +// HTTP header) and populates an Auth. If an error is returned it will always be +// of type AuthFormatError. +func ParseRequestHeader(header string) (*Auth, error) { + auth := &Auth{ActualTimestamp: Now()} + err := auth.ParseHeader(header, AuthHeader) + if err != nil { + return nil, err + } + + if auth.Credentials.ID == "" { + return nil, AuthFormatError{"id", "missing or empty"} + } + if auth.Timestamp.IsZero() { + return nil, AuthFormatError{"ts", "missing, empty, or zero"} + } + if auth.Nonce == "" { + return nil, AuthFormatError{"nonce", "missing or empty"} + } + auth.ReqHash = true + + return auth, nil +} + +// ParseBewit parses a bewit token provided in a URL parameter and populates an +// Auth. If an error is returned it will always be of type AuthFormatError. +func ParseBewit(bewit string) (*Auth, error) { + if len(bewit)%4 != 0 { + bewit += strings.Repeat("=", 4-len(bewit)%4) + } + decoded, err := base64.URLEncoding.DecodeString(bewit) + if err != nil { + return nil, AuthFormatError{"bewit", "malformed base64 encoding"} + } + components := bytes.SplitN(decoded, []byte(`\`), 4) + if len(components) != 4 { + return nil, AuthFormatError{"bewit", "missing components"} + } + + auth := &Auth{ + Credentials: Credentials{ID: string(components[0])}, + Ext: string(components[3]), + Method: "GET", + ActualTimestamp: Now(), + IsBewit: true, + } + + ts, err := strconv.ParseInt(string(components[1]), 10, 64) + if err != nil { + return nil, AuthFormatError{"ts", "not an integer"} + } + auth.Timestamp = time.Unix(ts, 0) + + auth.MAC = make([]byte, base64.StdEncoding.DecodedLen(len(components[2]))) + n, err := base64.StdEncoding.Decode(auth.MAC, components[2]) + if err != nil { + return nil, AuthFormatError{"mac", "malformed base64 encoding"} + } + auth.MAC = auth.MAC[:n] + + return auth, nil +} + +// NewAuthFromRequest parses a request containing an Authorization header or +// bewit parameter and populates an Auth. If creds is not nil it will be called +// to look up the associated credentials. If nonce is not nil it will be called +// to make sure the nonce is not replayed. +// +// If the request does not contain a bewit or Authorization header, ErrNoAuth is +// returned. If the request contains a bewit and it is not a GET or HEAD +// request, ErrInvalidBewitMethod is returned. If there is an error parsing the +// provided auth details, an AuthFormatError will be returned. If creds returns +// an error, it will be returned. If nonce returns false, ErrReplay will be +// returned. +func NewAuthFromRequest(req *http.Request, creds CredentialsLookupFunc, nonce NonceCheckFunc) (*Auth, error) { + header := req.Header.Get("Authorization") + bewit := req.URL.Query().Get("bewit") + + var auth *Auth + var err error + if header != "" { + auth, err = ParseRequestHeader(header) + if err != nil { + return nil, err + } + } + if auth == nil && bewit != "" { + if req.Method != "GET" && req.Method != "HEAD" { + return nil, ErrInvalidBewitMethod + } + auth, err = ParseBewit(bewit) + if err != nil { + return nil, err + } + } + if auth == nil { + return nil, ErrNoAuth + } + + auth.Method = req.Method + auth.RequestURI = req.URL.Path + if req.URL.RawQuery != "" { + auth.RequestURI += "?" + req.URL.RawQuery + } + if bewit != "" { + auth.Method = "GET" + bewitPattern, _ := regexp.Compile(`\?bewit=` + bewit + `\z|bewit=` + bewit + `&|&bewit=` + bewit + `\z`) + auth.RequestURI = bewitPattern.ReplaceAllString(auth.RequestURI, "") + } + auth.Host, auth.Port = extractReqHostPort(req) + if creds != nil { + err = creds(&auth.Credentials) + if err != nil { + return nil, err + } + } + if nonce != nil && !auth.IsBewit && !nonce(auth.Nonce, auth.Timestamp, &auth.Credentials) { + return nil, ErrReplay + } + return auth, nil +} + +func extractReqHostPort(req *http.Request) (host string, port string) { + if idx := strings.Index(req.Host, ":"); idx != -1 { + host, port, _ = net.SplitHostPort(req.Host) + } else { + host = req.Host + } + if req.URL.Host != "" { + if idx := strings.Index(req.Host, ":"); idx != -1 { + host, port, _ = net.SplitHostPort(req.Host) + } else { + host = req.URL.Host + } + } + if port == "" { + if req.URL.Scheme == "http" { + port = "80" + } else { + port = "443" + } + } + return +} + +// NewRequestAuth builds a client Auth based on req and creds. tsOffset will be +// applied to Now when setting the timestamp. +func NewRequestAuth(req *http.Request, creds *Credentials, tsOffset time.Duration) *Auth { + auth := &Auth{ + Method: req.Method, + Credentials: *creds, + Timestamp: Now().Add(tsOffset), + Nonce: nonce(), + RequestURI: req.URL.RequestURI(), + } + auth.Host, auth.Port = extractReqHostPort(req) + return auth +} + +// NewRequestAuth builds a client Auth based on uri and creds. tsOffset will be +// applied to Now when setting the timestamp. +func NewURLAuth(uri string, creds *Credentials, tsOffset time.Duration) (*Auth, error) { + u, err := url.Parse(uri) + if err != nil { + return nil, err + } + auth := &Auth{ + Method: "GET", + Credentials: *creds, + Timestamp: Now().Add(tsOffset), + } + if u.Path != "" { + // url.Parse unescapes the path, which is unexpected + auth.RequestURI = "/" + strings.SplitN(uri[8:], "/", 2)[1] + } else { + auth.RequestURI = "/" + } + auth.Host, auth.Port = extractURLHostPort(u) + return auth, nil +} + +func extractURLHostPort(u *url.URL) (host string, port string) { + if idx := strings.Index(u.Host, ":"); idx != -1 { + host, port, _ = net.SplitHostPort(u.Host) + } else { + host = u.Host + } + if port == "" { + if u.Scheme == "http" { + port = "80" + } else { + port = "443" + } + } + return +} + +func nonce() string { + b := make([]byte, 8) + _, err := io.ReadFull(rand.Reader, b) + if err != nil { + panic(err) + } + return base64.StdEncoding.EncodeToString(b)[:8] +} + +const headerVersion = "1" + +type Auth struct { + Credentials Credentials + + Method string + RequestURI string + Host string + Port string + + MAC []byte + Nonce string + Ext string + Hash []byte + + // ReqHash is true if the request contained a hash + ReqHash bool + IsBewit bool + Timestamp time.Time + + // ActualTimestamp is when the request was received + ActualTimestamp time.Time +} + +// field is of form: key="value" +func lexField(r *strings.Reader) (string, string, error) { + key := make([]byte, 0, 5) + val := make([]byte, 0, 32) + + // read the key + for { + ch, _ := r.ReadByte() + if ch == '=' { + break + } + if ch < 'a' || ch > 'z' { // fail if not ASCII lowercase letter + return "", "", AuthFormatError{"header", "cannot parse header field"} + } + key = append(key, ch) + } + if ch, _ := r.ReadByte(); ch != '"' { + return "", "", AuthFormatError{string(key), "cannot parse value"} + } + // read the value + for { + ch, _ := r.ReadByte() + if ch == '"' { + break + } + // character class is ASCII printable [\x20-\x7E] without \ and " + if ch < 0x20 || ch > 0x7E || ch == '\\' { + return "", "", AuthFormatError{string(key), "cannot parse value"} + } + val = append(val, ch) + } + + return string(key), string(val), nil +} + +func lexHeader(header string) (map[string]string, error) { + params := make(map[string]string, 8) + + r := strings.NewReader(header) + + for { + ch, eof := r.ReadByte() + if eof != nil { + break + } + + switch { + case ch == ' ' || ch == '\t' || ch == ',': //ignore spaces and commas + case ch >= 'a' && ch <= 'z': //beginning of key/value pair like 'id="abcdefg"' + r.UnreadByte() + key, val, err := lexField(r) + if err != nil { + return params, err + } + params[key] = val + default: //invalid character encountered + return params, AuthFormatError{"header", "cannot parse header"} + } + } + return params, nil +} + +// ParseHeader parses a Hawk request or response header and populates auth. +// t must be AuthHeader if the header is an Authorization header from a request +// or AuthResponse if the header is a Server-Authorization header from +// a response. +func (auth *Auth) ParseHeader(header string, t AuthType) error { + if len(header) < 4 || strings.ToLower(header[:4]) != "hawk" { + return AuthFormatError{"scheme", "must be Hawk"} + } + + fields, err := lexHeader(header[4:]) + if err != nil { + return err + } + + if hash, ok := fields["hash"]; ok { + auth.Hash, err = base64.StdEncoding.DecodeString(hash) + if err != nil { + return AuthFormatError{"hash", "malformed base64 encoding"} + } + } + auth.Ext = fields["ext"] + + mac := fields["mac"] + if mac == "" { + return AuthFormatError{"mac", "missing or empty"} + } + auth.MAC, err = base64.StdEncoding.DecodeString(mac) + if err != nil { + return AuthFormatError{"mac", "malformed base64 encoding"} + } + if t == AuthHeader { + auth.Credentials.App = fields["app"] + auth.Credentials.Delegate = fields["dlg"] + auth.Credentials.ID = fields["id"] + + if ts, ok := fields["ts"]; ok { + tsint, err := strconv.ParseInt(ts, 10, 64) + if err != nil { + return AuthFormatError{"ts", "not an integer"} + } + auth.Timestamp = time.Unix(tsint, 0) + } + auth.Nonce = fields["nonce"] + } + return nil +} + +// Valid confirms that the timestamp is within skew and verifies the MAC. +// +// If the request is valid, nil will be returned. If auth is a bewit and the +// method is not GET or HEAD, ErrInvalidBewitMethod will be returned. If auth is +// a bewit and the timestamp is after the the specified expiry, ErrBewitExpired +// will be returned. If auth is from a request header and the timestamp is +// outside the maximum skew, ErrTimestampSkew will be returned. If the MAC is +// not the expected value, ErrInvalidMAC will be returned. +func (auth *Auth) Valid() error { + t := AuthHeader + if auth.IsBewit { + t = AuthBewit + if auth.Method != "GET" && auth.Method != "HEAD" { + return ErrInvalidBewitMethod + } + if auth.ActualTimestamp.After(auth.Timestamp) { + return ErrBewitExpired + } + } else { + skew := auth.ActualTimestamp.Sub(auth.Timestamp) + if abs(skew) > MaxTimestampSkew { + return ErrTimestampSkew + } + } + if !hmac.Equal(auth.mac(t), auth.MAC) { + if auth.IsBewit && strings.HasPrefix(auth.RequestURI, "http") && len(auth.RequestURI) > 9 { + // try just the path + uri := auth.RequestURI + auth.RequestURI = "/" + strings.SplitN(auth.RequestURI[8:], "/", 2)[1] + if auth.Valid() == nil { + return nil + } + auth.RequestURI = uri + } + return ErrInvalidMAC + } + return nil +} + +func abs(d time.Duration) time.Duration { + if d < 0 { + return -d + } + return d +} + +// ValidResponse checks that a response Server-Authorization header is correct. +// +// ErrMissingServerAuth is returned if header is an empty string. ErrInvalidMAC +// is returned if the MAC is not the expected value. +func (auth *Auth) ValidResponse(header string) error { + if header == "" { + return ErrMissingServerAuth + } + err := auth.ParseHeader(header, AuthResponse) + if err != nil { + return err + } + if !hmac.Equal(auth.mac(AuthResponse), auth.MAC) { + return ErrInvalidMAC + } + return nil +} + +// PayloadHash initializes a hash for body validation. To validate a request or +// response body, call PayloadHash with contentType set to the body Content-Type +// with all parameters and prefix/suffix whitespace stripped, write the entire +// body to the returned hash, and then validate the hash with ValidHash. +func (auth *Auth) PayloadHash(contentType string) hash.Hash { + h := auth.Credentials.Hash() + h.Write([]byte("hawk." + headerVersion + ".payload\n" + contentType + "\n")) + return h +} + +// ValidHash writes the final newline to h and checks if it matches auth.Hash. +func (auth *Auth) ValidHash(h hash.Hash) bool { + h.Write([]byte("\n")) + return bytes.Equal(h.Sum(nil), auth.Hash) +} + +// SetHash writes the final newline to h and sets auth.Hash to the sum. This is +// used to specify a response payload hash. +func (auth *Auth) SetHash(h hash.Hash) { + h.Write([]byte("\n")) + auth.Hash = h.Sum(nil) + auth.ReqHash = false +} + +// ResponseHeader builds a response header based on the auth and provided ext, +// which may be an empty string. Use PayloadHash and SetHash before +// ResponseHeader to include a hash of the response payload. +func (auth *Auth) ResponseHeader(ext string) string { + auth.Ext = ext + if auth.ReqHash { + auth.Hash = nil + } + + h := `Hawk mac="` + base64.StdEncoding.EncodeToString(auth.mac(AuthResponse)) + `"` + if auth.Ext != "" { + h += `, ext="` + auth.Ext + `"` + } + if auth.Hash != nil { + h += `, hash="` + base64.StdEncoding.EncodeToString(auth.Hash) + `"` + } + + return h +} + +// RequestHeader builds a request header based on the auth. +func (auth *Auth) RequestHeader() string { + auth.MAC = auth.mac(AuthHeader) + + h := `Hawk id="` + auth.Credentials.ID + + `", mac="` + base64.StdEncoding.EncodeToString(auth.MAC) + + `", ts="` + strconv.FormatInt(auth.Timestamp.Unix(), 10) + + `", nonce="` + auth.Nonce + `"` + + if len(auth.Hash) > 0 { + h += `, hash="` + base64.StdEncoding.EncodeToString(auth.Hash) + `"` + } + if auth.Ext != "" { + h += `, ext="` + auth.Ext + `"` + } + if auth.Credentials.App != "" { + h += `, app="` + auth.Credentials.App + `"` + } + if auth.Credentials.Delegate != "" { + h += `, dlg="` + auth.Credentials.Delegate + `"` + } + + return h +} + +// Bewit creates and encoded request bewit parameter based on the auth. +func (auth *Auth) Bewit() string { + auth.Method = "GET" + auth.Nonce = "" + return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(auth.Credentials.ID+`\`+ + strconv.FormatInt(auth.Timestamp.Unix(), 10)+`\`+ + base64.StdEncoding.EncodeToString(auth.mac(AuthBewit))+`\`+ + auth.Ext)), "=") +} + +// NormalizedString builds the string that will be HMACed to create a request +// MAC. +func (auth *Auth) NormalizedString(t AuthType) string { + str := "hawk." + headerVersion + "." + t.String() + "\n" + + strconv.FormatInt(auth.Timestamp.Unix(), 10) + "\n" + + auth.Nonce + "\n" + + auth.Method + "\n" + + auth.RequestURI + "\n" + + auth.Host + "\n" + + auth.Port + "\n" + + base64.StdEncoding.EncodeToString(auth.Hash) + "\n" + + auth.Ext + "\n" + + if auth.Credentials.App != "" { + str += auth.Credentials.App + "\n" + str += auth.Credentials.Delegate + "\n" + } + + return str +} + +func (auth *Auth) mac(t AuthType) []byte { + mac := auth.Credentials.MAC() + mac.Write([]byte(auth.NormalizedString(t))) + return mac.Sum(nil) +} + +func (auth *Auth) tsMac(ts string) []byte { + mac := auth.Credentials.MAC() + mac.Write([]byte("hawk." + headerVersion + ".ts\n" + ts + "\n")) + return mac.Sum(nil) +} + +// StaleTimestampHeader builds a signed WWW-Authenticate response header for use +// when Valid returns ErrTimestampSkew. +func (auth *Auth) StaleTimestampHeader() string { + ts := strconv.FormatInt(Now().Unix(), 10) + return `Hawk ts="` + ts + + `", tsm="` + base64.StdEncoding.EncodeToString(auth.tsMac(ts)) + + `", error="Stale timestamp"` +} + +var tsHeaderRegex = regexp.MustCompile(`(ts|tsm|error)="([ !#-\[\]-~]+)"`) // character class is ASCII printable [\x20-\x7E] without \ and " + +// UpdateOffset parses a signed WWW-Authenticate response header containing +// a stale timestamp error and updates auth.Timestamp with an adjusted +// timestamp. +func (auth *Auth) UpdateOffset(header string) (time.Duration, error) { + if len(header) < 4 || strings.ToLower(header[:4]) != "hawk" { + return 0, AuthFormatError{"scheme", "must be Hawk"} + } + + matches := tsHeaderRegex.FindAllStringSubmatch(header, 3) + + var err error + var ts time.Time + var tsm []byte + + for _, match := range matches { + switch match[1] { + case "ts": + t, err := strconv.ParseInt(match[2], 10, 64) + if err != nil { + return 0, AuthFormatError{"ts", "not an integer"} + } + ts = time.Unix(t, 0) + case "tsm": + tsm, err = base64.StdEncoding.DecodeString(match[2]) + if err != nil { + return 0, AuthFormatError{"tsm", "malformed base64 encoding"} + } + } + } + + if !hmac.Equal(tsm, auth.tsMac(strconv.FormatInt(ts.Unix(), 10))) { + return 0, ErrInvalidMAC + } + + offset := ts.Sub(Now()) + auth.Timestamp = ts + auth.Nonce = nonce() + return offset, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 467d1e0..60b0c26 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,15 +1,45 @@ +# github.com/cenkalti/backoff/v3 v3.0.0 +github.com/cenkalti/backoff/v3 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/fatih/camelcase v1.0.0 +github.com/fatih/camelcase +# github.com/google/uuid v1.0.0 +github.com/google/uuid +# github.com/pborman/uuid v1.2.0 +github.com/pborman/uuid # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 +## explicit +github.com/streadway/amqp # github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7 ## explicit github.com/stretchr/testify/assert +# github.com/taskcluster/httpbackoff/v3 v3.0.0 +github.com/taskcluster/httpbackoff/v3 +# github.com/taskcluster/jsonschema2go v1.0.0 +github.com/taskcluster/jsonschema2go/text +# github.com/taskcluster/pulse-go v1.0.0 +## explicit +github.com/taskcluster/pulse-go/pulse +# github.com/taskcluster/slugid-go v1.1.0 +github.com/taskcluster/slugid-go/slugid +# github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible +github.com/taskcluster/taskcluster-lib-urls +# github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0 +## explicit +github.com/taskcluster/taskcluster/clients/client-go/v23 +github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents +# github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 +github.com/tent/hawk-go # github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384 ## explicit github.com/urfave/cli # go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44 ## explicit go.mozilla.org/mozlog +# launchpad.net/gocheck v0.0.0-20140225173054-000000000087 +## explicit