diff --git a/.travis.yml b/.travis.yml index 676c4e30..3738b1c9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,2 +1,5 @@ --- language: go + +go: + - 1.5.1 diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 842f36bf..c4ead05f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,7 @@ { "ImportPath": "github.com/AcalephStorage/consul-alerts", "GoVersion": "go1.5.1", + "GodepVersion": "v60", "Packages": [ "./..." ], @@ -11,9 +12,104 @@ "Rev": "e3eccfaeb58b95c9ce22aea6867562f07a0d68c7" }, { - "ImportPath": "github.com/cihub/seelog", - "Comment": "v2.6-1-g3549fbb", - "Rev": "3549fbb0ea07f15a54dba3d16ccb22785a813d30" + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sns", + "Comment": "v1.1.12", + "Rev": "4da0bec8953a0a540f391930a946917b12a95671" }, { "ImportPath": "github.com/darkcrux/gopherduty", @@ -28,6 +124,11 @@ "ImportPath": "github.com/franela/goreq", "Rev": "41f8ae0dfac5ef5d94708e02dfc19fab3e66bbf4" }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v1.10.1", + "Rev": "776aa739ce9373377cd16f526cdf06cb4c89b40f" + }, { "ImportPath": "github.com/google/go-querystring/query", "Rev": "547ef5ac979778feb2f760cdb5f4eae1a2207b86" @@ -42,6 +143,11 @@ "Comment": "v0.8.5", "Rev": "9485e99796c53635fbf179b3e973c363937de00b" }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2-12-g0b12d6b", + "Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74" + }, { "ImportPath": "github.com/opsgenie/opsgenie-go-sdk/alerts", "Rev": "89e68948bffac4a7d62d1b8fc1ab5ee99d8a3cd8" @@ -58,10 +164,6 @@ "ImportPath": "github.com/opsgenie/opsgenie-go-sdk/integration", "Rev": "89e68948bffac4a7d62d1b8fc1ab5ee99d8a3cd8" }, - { - "ImportPath": "github.com/opsgenie/opsgenie-go-sdk/logging", - "Rev": "89e68948bffac4a7d62d1b8fc1ab5ee99d8a3cd8" - }, { "ImportPath": "github.com/opsgenie/opsgenie-go-sdk/policy", "Rev": "89e68948bffac4a7d62d1b8fc1ab5ee99d8a3cd8" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000..5f14d116 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000..e50771f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000..e2d333b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occured", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000..8429470b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000..59fa4a55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000..3208ab79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000..fc38172f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,107 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000..b6432f1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 00000000..2fa7dd57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000..16ddf4cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,50 @@ +package client + +import ( + "math/rand" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } + + delay := (1 << uint(retryCount)) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000..4778056d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 00000000..3417bab9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,311 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 00000000..d6a7b08d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000..70f55633 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000..a9bc71bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,153 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// There's no validation to be done on the contents of []byte values. Prepare +// to check validateAny arguments against that type so we can quickly skip +// them. +var byteSliceType = reflect.TypeOf([]byte(nil)) + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + if value.Type() == byteSliceType { + // We don't need to validate the contents of []byte. + return + } + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000..4dad2ae0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000..7b8ebf5f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000..81539d72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000..3758adbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000..7fc91d9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000..53f681ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/go-ini/ini" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000..f0c94aa4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000..a25ca87f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,97 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000..a7efa1bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,105 @@ +package ec2metadata + +import ( + "encoding/json" + "path" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2RoleRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000..9c1a0911 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 00000000..9348ddec --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 00000000..db87188e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000..5279c19c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 00000000..da6396d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,49 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.RWMutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close is a thread-safe close. Uses the write lock. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read using a read lock. +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.RLock() + defer o.lock.RUnlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 00000000..efce521d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,322 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = newOffsetReader(reader, 0) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + var body io.ReadCloser + if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { + body = reader.CloseAndCopy(r.BodyStart) + } else { + if r.Config.Logger != nil { + r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") + } + r.Body.Seek(r.BodyStart, 0) + body = ioutil.NopCloser(r.Body) + } + + r.HTTPRequest = &http.Request{ + URL: r.HTTPRequest.URL, + Header: r.HTTPRequest.Header, + Close: r.HTTPRequest.Close, + Form: r.HTTPRequest.Form, + PostForm: r.HTTPRequest.PostForm, + Body: body, + MultipartForm: r.HTTPRequest.MultipartForm, + Host: r.HTTPRequest.Host, + Method: r.HTTPRequest.Method, + Proto: r.HTTPRequest.Proto, + ContentLength: r.HTTPRequest.ContentLength, + } + // Closing response body. Since we are setting a new request to send off, this + // response will get squashed and leaked. + r.HTTPResponse.Body.Close() + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000..57531adc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000..971ec935 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,82 @@ +package request + +import ( + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000..4bb19219 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 00000000..0f067c57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,88 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + expLen := pos + int64(len(p)) + if int64(len(b.buf)) < expLen { + newBuf := make([]byte, expLen) + copy(newBuf, b.buf) + b.buf = newBuf + } + copy(b.buf[pos:], p) + return len(p), nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 00000000..3dd51181 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.1.12" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 00000000..2b279e65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 00000000..0cb6917b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,70 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com" + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 00000000..6183dcd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,83 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000..53831dff --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000..b3420901 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000..dc8cf54d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000..f6b9aee7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000..1e7034e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000..0e6c3029 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,256 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000..4366de2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000..aecac320 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,198 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000..9f222165 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000..252df82e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000..49f291a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000..72c198a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go new file mode 100644 index 00000000..244c86da --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go new file mode 100644 index 00000000..506587c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go @@ -0,0 +1,438 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Content-Length": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string + notHoist bool + signedHeaderVals http.Header +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + notHoist: req.NotHoist, + } + + req.Error = s.sign() + req.SignedHeaderVals = s.signedHeaderVals +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + + unsignedHeaders := v4.Request.Header + if v4.isPresign { + if !v4.notHoist { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + v4.Query[k] = urlValues[k] + } + } + } + + v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + headers = append(headers, lowerCaseKey) + + if v4.signedHeaderVals == nil { + v4.signedHeaderVals = make(http.Header) + } + v4.signedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns/api.go new file mode 100644 index 00000000..d167a956 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns/api.go @@ -0,0 +1,2100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sns provides a client for Amazon Simple Notification Service. +package sns + +import ( + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a statement to a topic's access control policy, granting access for +// the specified AWS accounts to the specified actions. +func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opConfirmSubscription = "ConfirmSubscription" + +// ConfirmSubscriptionRequest generates a request for the ConfirmSubscription operation. +func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { + op := &request.Operation{ + Name: opConfirmSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmSubscriptionOutput{} + req.Data = output + return +} + +// Verifies an endpoint owner's intent to receive messages by validating the +// token sent to the endpoint by an earlier Subscribe action. If the token is +// valid, the action creates a new subscription and returns its Amazon Resource +// Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe +// flag is set to "true". +func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { + req, out := c.ConfirmSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformApplication = "CreatePlatformApplication" + +// CreatePlatformApplicationRequest generates a request for the CreatePlatformApplication operation. +func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { + op := &request.Operation{ + Name: opCreatePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformApplicationOutput{} + req.Data = output + return +} + +// Creates a platform application object for one of the supported push notification +// services, such as APNS and GCM, to which devices and mobile apps may register. +// You must specify PlatformPrincipal and PlatformCredential attributes when +// using the CreatePlatformApplication action. The PlatformPrincipal is received +// from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is +// "SSL certificate". For GCM, PlatformPrincipal is not applicable. For ADM, +// PlatformPrincipal is "client id". The PlatformCredential is also received +// from the notification service. For APNS/APNS_SANDBOX, PlatformCredential +// is "private key". For GCM, PlatformCredential is "API key". For ADM, PlatformCredential +// is "client secret". The PlatformApplicationArn that is returned when using +// CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint +// action. For more information, see Using Amazon SNS Mobile Push Notifications +// (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { + req, out := c.CreatePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformEndpoint = "CreatePlatformEndpoint" + +// CreatePlatformEndpointRequest generates a request for the CreatePlatformEndpoint operation. +func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { + op := &request.Operation{ + Name: opCreatePlatformEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformEndpointOutput{} + req.Data = output + return +} + +// Creates an endpoint for a device and mobile app on one of the supported push +// notification services, such as GCM and APNS. CreatePlatformEndpoint requires +// the PlatformApplicationArn that is returned from CreatePlatformApplication. +// The EndpointArn that is returned when using CreatePlatformEndpoint can then +// be used by the Publish action to send a message to a mobile app or by the +// Subscribe action for subscription to a topic. The CreatePlatformEndpoint +// action is idempotent, so if the requester already owns an endpoint with the +// same device token and attributes, that endpoint's ARN is returned without +// creating a new endpoint. For more information, see Using Amazon SNS Mobile +// Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// +// When using CreatePlatformEndpoint with Baidu, two attributes must be provided: +// ChannelId and UserId. The token field must also contain the ChannelId. For +// more information, see Creating an Amazon SNS Endpoint for Baidu (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePushBaiduEndpoint.html). +func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*CreatePlatformEndpointOutput, error) { + req, out := c.CreatePlatformEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateTopic = "CreateTopic" + +// CreateTopicRequest generates a request for the CreateTopic operation. +func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { + op := &request.Operation{ + Name: opCreateTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTopicOutput{} + req.Data = output + return +} + +// Creates a topic to which notifications can be published. Users can create +// at most 3000 topics. For more information, see http://aws.amazon.com/sns +// (http://aws.amazon.com/sns/). This action is idempotent, so if the requester +// already owns a topic with the specified name, that topic's ARN is returned +// without creating a new topic. +func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { + req, out := c.CreateTopicRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEndpoint = "DeleteEndpoint" + +// DeleteEndpointRequest generates a request for the DeleteEndpoint operation. +func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { + op := &request.Operation{ + Name: opDeleteEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEndpointOutput{} + req.Data = output + return +} + +// Deletes the endpoint from Amazon SNS. This action is idempotent. For more +// information, see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlatformApplication = "DeletePlatformApplication" + +// DeletePlatformApplicationRequest generates a request for the DeletePlatformApplication operation. +func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { + op := &request.Operation{ + Name: opDeletePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePlatformApplicationOutput{} + req.Data = output + return +} + +// Deletes a platform application object for one of the supported push notification +// services, such as APNS and GCM. For more information, see Using Amazon SNS +// Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) (*DeletePlatformApplicationOutput, error) { + req, out := c.DeletePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTopic = "DeleteTopic" + +// DeleteTopicRequest generates a request for the DeleteTopic operation. +func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { + op := &request.Operation{ + Name: opDeleteTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTopicInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTopicOutput{} + req.Data = output + return +} + +// Deletes a topic and all its subscriptions. Deleting a topic might prevent +// some messages previously sent to the topic from being delivered to subscribers. +// This action is idempotent, so deleting a topic that does not exist does not +// result in an error. +func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { + req, out := c.DeleteTopicRequest(input) + err := req.Send() + return out, err +} + +const opGetEndpointAttributes = "GetEndpointAttributes" + +// GetEndpointAttributesRequest generates a request for the GetEndpointAttributes operation. +func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opGetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetEndpointAttributesOutput{} + req.Data = output + return +} + +// Retrieves the endpoint attributes for a device on one of the supported push +// notification services, such as GCM and APNS. For more information, see Using +// Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndpointAttributesOutput, error) { + req, out := c.GetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" + +// GetPlatformApplicationAttributesRequest generates a request for the GetPlatformApplicationAttributes operation. +func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opGetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Retrieves the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttributesInput) (*GetPlatformApplicationAttributesOutput, error) { + req, out := c.GetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSubscriptionAttributes = "GetSubscriptionAttributes" + +// GetSubscriptionAttributesRequest generates a request for the GetSubscriptionAttributes operation. +func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opGetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a subscription. +func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) (*GetSubscriptionAttributesOutput, error) { + req, out := c.GetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetTopicAttributes = "GetTopicAttributes" + +// GetTopicAttributesRequest generates a request for the GetTopicAttributes operation. +func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { + op := &request.Operation{ + Name: opGetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTopicAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a topic. Topic properties returned might +// differ based on the authorization of the user. +func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttributesOutput, error) { + req, out := c.GetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication" + +// ListEndpointsByPlatformApplicationRequest generates a request for the ListEndpointsByPlatformApplication operation. +func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { + op := &request.Operation{ + Name: opListEndpointsByPlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEndpointsByPlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEndpointsByPlatformApplicationOutput{} + req.Data = output + return +} + +// Lists the endpoints and endpoint attributes for devices in a supported push +// notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication +// are paginated and return a limited list of endpoints, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListEndpointsByPlatformApplication +// again using the NextToken string received from the previous call. When there +// are no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformApplicationInput) (*ListEndpointsByPlatformApplicationOutput, error) { + req, out := c.ListEndpointsByPlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(p *ListEndpointsByPlatformApplicationOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEndpointsByPlatformApplicationRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEndpointsByPlatformApplicationOutput), lastPage) + }) +} + +const opListPlatformApplications = "ListPlatformApplications" + +// ListPlatformApplicationsRequest generates a request for the ListPlatformApplications operation. +func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { + op := &request.Operation{ + Name: opListPlatformApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPlatformApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPlatformApplicationsOutput{} + req.Data = output + return +} + +// Lists the platform application objects for the supported push notification +// services, such as APNS and GCM. The results for ListPlatformApplications +// are paginated and return a limited list of applications, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListPlatformApplications +// using the NextToken string received from the previous call. When there are +// no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*ListPlatformApplicationsOutput, error) { + req, out := c.ListPlatformApplicationsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(p *ListPlatformApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPlatformApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPlatformApplicationsOutput), lastPage) + }) +} + +const opListSubscriptions = "ListSubscriptions" + +// ListSubscriptionsRequest generates a request for the ListSubscriptions operation. +func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { + op := &request.Operation{ + Name: opListSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's subscriptions. Each call returns a limited +// list of subscriptions, up to 100. If there are more subscriptions, a NextToken +// is also returned. Use the NextToken parameter in a new ListSubscriptions +// call to get further results. +func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptionsOutput, error) { + req, out := c.ListSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(p *ListSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsOutput), lastPage) + }) +} + +const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" + +// ListSubscriptionsByTopicRequest generates a request for the ListSubscriptionsByTopic operation. +func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { + op := &request.Operation{ + Name: opListSubscriptionsByTopic, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsByTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsByTopicOutput{} + req.Data = output + return +} + +// Returns a list of the subscriptions to a specific topic. Each call returns +// a limited list of subscriptions, up to 100. If there are more subscriptions, +// a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic +// call to get further results. +func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*ListSubscriptionsByTopicOutput, error) { + req, out := c.ListSubscriptionsByTopicRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(p *ListSubscriptionsByTopicOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsByTopicRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsByTopicOutput), lastPage) + }) +} + +const opListTopics = "ListTopics" + +// ListTopicsRequest generates a request for the ListTopics operation. +func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { + op := &request.Operation{ + Name: opListTopics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTopicsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTopicsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's topics. Each call returns a limited list +// of topics, up to 100. If there are more topics, a NextToken is also returned. +// Use the NextToken parameter in a new ListTopics call to get further results. +func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { + req, out := c.ListTopicsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(p *ListTopicsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTopicsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTopicsOutput), lastPage) + }) +} + +const opPublish = "Publish" + +// PublishRequest generates a request for the Publish operation. +func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { + op := &request.Operation{ + Name: opPublish, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PublishInput{} + } + + req = c.newRequest(op, input, output) + output = &PublishOutput{} + req.Data = output + return +} + +// Sends a message to all of a topic's subscribed endpoints. When a messageId +// is returned, the message has been saved and Amazon SNS will attempt to deliver +// it to the topic's subscribers shortly. The format of the outgoing message +// to each subscribed endpoint depends on the notification protocol selected. +// +// To use the Publish action for sending a message to a mobile endpoint, such +// as an app on a Kindle device or mobile phone, you must specify the EndpointArn. +// The EndpointArn is returned when making a call with the CreatePlatformEndpoint +// action. The second example below shows a request and response for publishing +// to a mobile endpoint. +func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { + req, out := c.PublishRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Removes a statement from a topic's access control policy. +func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetEndpointAttributes = "SetEndpointAttributes" + +// SetEndpointAttributesRequest generates a request for the SetEndpointAttributes operation. +func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opSetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetEndpointAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes for an endpoint for a device on one of the supported +// push notification services, such as GCM and APNS. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndpointAttributesOutput, error) { + req, out := c.SetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" + +// SetPlatformApplicationAttributesRequest generates a request for the SetPlatformApplicationAttributes operation. +func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opSetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { + req, out := c.SetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetSubscriptionAttributes = "SetSubscriptionAttributes" + +// SetSubscriptionAttributesRequest generates a request for the SetSubscriptionAttributes operation. +func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opSetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Allows a subscription owner to set an attribute of the topic to a new value. +func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) (*SetSubscriptionAttributesOutput, error) { + req, out := c.SetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetTopicAttributes = "SetTopicAttributes" + +// SetTopicAttributesRequest generates a request for the SetTopicAttributes operation. +func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { + op := &request.Operation{ + Name: opSetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTopicAttributesOutput{} + req.Data = output + return +} + +// Allows a topic owner to set an attribute of the topic to a new value. +func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttributesOutput, error) { + req, out := c.SetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSubscribe = "Subscribe" + +// SubscribeRequest generates a request for the Subscribe operation. +func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { + op := &request.Operation{ + Name: opSubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubscribeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubscribeOutput{} + req.Data = output + return +} + +// Prepares to subscribe an endpoint by sending the endpoint a confirmation +// message. To actually create a subscription, the endpoint owner must call +// the ConfirmSubscription action with the token from the confirmation message. +// Confirmation tokens are valid for three days. +func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { + req, out := c.SubscribeRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribe = "Unsubscribe" + +// UnsubscribeRequest generates a request for the Unsubscribe operation. +func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { + op := &request.Operation{ + Name: opUnsubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnsubscribeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnsubscribeOutput{} + req.Data = output + return +} + +// Deletes a subscription. If the subscription requires authentication for deletion, +// only the owner of the subscription or the topic's owner can unsubscribe, +// and an AWS signature is required. If the Unsubscribe call does not require +// authentication and the requester is not the subscription owner, a final cancellation +// message is delivered to the endpoint, so that the endpoint owner can easily +// resubscribe to the topic if the Unsubscribe request was unintended. +func (c *SNS) Unsubscribe(input *UnsubscribeInput) (*UnsubscribeOutput, error) { + req, out := c.UnsubscribeRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account IDs of the users (principals) who will be given access to + // the specified actions. The users must have AWS accounts, but do not need + // to be signed up for this service. + AWSAccountId []*string `type:"list" required:"true"` + + // The action you want to allow for the specified principal(s). + // + // Valid values: any Amazon SNS action name. + ActionName []*string `type:"list" required:"true"` + + // A unique identifier for the new policy statement. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// Input for ConfirmSubscription action. +type ConfirmSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Disallows unauthenticated unsubscribes of the subscription. If the value + // of this parameter is true and the request has an AWS signature, then only + // the topic owner and the subscription owner can unsubscribe the endpoint. + // The unsubscribe action requires AWS authentication. + AuthenticateOnUnsubscribe *string `type:"string"` + + // Short-lived token sent to an endpoint during the Subscribe action. + Token *string `type:"string" required:"true"` + + // The ARN of the topic for which you wish to confirm a subscription. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionInput) GoString() string { + return s.String() +} + +// Response for ConfirmSubscriptions action. +type ConfirmSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the created subscription. + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s ConfirmSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformApplication action. +type CreatePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetPlatformApplicationAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html) + Attributes map[string]*string `type:"map" required:"true"` + + // Application names must be made up of only uppercase and lowercase ASCII letters, + // numbers, underscores, hyphens, and periods, and must be between 1 and 256 + // characters long. + Name *string `type:"string" required:"true"` + + // The following platforms are supported: ADM (Amazon Device Messaging), APNS + // (Apple Push Notification Service), APNS_SANDBOX, and GCM (Google Cloud Messaging). + Platform *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationInput) GoString() string { + return s.String() +} + +// Response from CreatePlatformApplication action. +type CreatePlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn is returned. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformEndpoint action. +type CreatePlatformEndpointInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetEndpointAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetEndpointAttributes.html). + Attributes map[string]*string `type:"map"` + + // Arbitrary user data to associate with the endpoint. Amazon SNS does not use + // this data. The data must be in UTF-8 format and less than 2KB. + CustomUserData *string `type:"string"` + + // PlatformApplicationArn returned from CreatePlatformApplication is used to + // create a an endpoint. + PlatformApplicationArn *string `type:"string" required:"true"` + + // Unique identifier created by the notification service for an app on a device. + // The specific name for Token will vary, depending on which notification service + // is being used. For example, when using APNS as the notification service, + // you need the device token. Alternatively, when using GCM or ADM, the device + // token equivalent is called the registration ID. + Token *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointInput) GoString() string { + return s.String() +} + +// Response from CreateEndpoint action. +type CreatePlatformEndpointOutput struct { + _ struct{} `type:"structure"` + + // EndpointArn returned from CreateEndpoint action. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointOutput) GoString() string { + return s.String() +} + +// Input for CreateTopic action. +type CreateTopicInput struct { + _ struct{} `type:"structure"` + + // The name of the topic you want to create. + // + // Constraints: Topic names must be made up of only uppercase and lowercase + // ASCII letters, numbers, underscores, and hyphens, and must be between 1 and + // 256 characters long. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicInput) GoString() string { + return s.String() +} + +// Response from CreateTopic action. +type CreateTopicOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) assigned to the created topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicOutput) GoString() string { + return s.String() +} + +// Input for DeleteEndpoint action. +type DeleteEndpointInput struct { + _ struct{} `type:"structure"` + + // EndpointArn of endpoint to delete. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} + +type DeleteEndpointOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointOutput) GoString() string { + return s.String() +} + +// Input for DeletePlatformApplication action. +type DeletePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn of platform application object to delete. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationInput) GoString() string { + return s.String() +} + +type DeletePlatformApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationOutput) GoString() string { + return s.String() +} + +type DeleteTopicInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic you want to delete. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicInput) GoString() string { + return s.String() +} + +type DeleteTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicOutput) GoString() string { + return s.String() +} + +// Endpoint for mobile app and device. +type Endpoint struct { + _ struct{} `type:"structure"` + + // Attributes for endpoint. + Attributes map[string]*string `type:"map"` + + // EndpointArn for mobile app and device. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Input for GetEndpointAttributes action. +type GetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // EndpointArn for GetEndpointAttributes input. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesInput) GoString() string { + return s.String() +} + +// Response from GetEndpointAttributes of the EndpointArn. +type GetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. + // Amazon SNS does not use this data. The data must be in UTF-8 format and less + // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. + // Amazon SNS will set this to false when a notification service indicates to + // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically + // after updating Token. Token -- device token, also referred to as a registration + // id, for an app and mobile device. This is returned from the notification + // service when an app and mobile device are registered with the notification + // service. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn for GetPlatformApplicationAttributesInput. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +// Response for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. EventEndpointDeleted -- Topic ARN to which EndpointDeleted + // event notifications should be sent. EventEndpointUpdated -- Topic ARN to + // which EndpointUpdate event notifications should be sent. EventDeliveryFailure + // -- Topic ARN to which DeliveryFailure event notifications should be sent + // upon Direct Publish delivery failure (permanent) to one of the application's + // endpoints. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetSubscriptionAttributes. +type GetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription whose properties you want to get. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +// Response for GetSubscriptionAttributes action. +type GetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the subscription's attributes. Attributes in this map include the + // following: + // + // SubscriptionArn -- the subscription's ARN TopicArn -- the topic ARN that + // the subscription is associated with Owner -- the AWS account ID of the subscription's + // owner ConfirmationWasAuthenticated -- true if the subscription confirmation + // request was authenticated DeliveryPolicy -- the JSON serialization of the + // subscription's delivery policy EffectiveDeliveryPolicy -- the JSON serialization + // of the effective delivery policy that takes into account the topic delivery + // policy and account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetTopicAttributes action. +type GetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic whose properties you want to get. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesInput) GoString() string { + return s.String() +} + +// Response for GetTopicAttributes action. +type GetTopicAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the topic's attributes. Attributes in this map include the following: + // + // TopicArn -- the topic's ARN Owner -- the AWS account ID of the topic's + // owner Policy -- the JSON serialization of the topic's access control policy + // DisplayName -- the human-readable name used in the "From" field for notifications + // to email and email-json endpoints SubscriptionsPending -- the number of + // subscriptions pending confirmation on this topic SubscriptionsConfirmed + // -- the number of confirmed subscriptions on this topic SubscriptionsDeleted + // -- the number of deleted subscriptions on this topic DeliveryPolicy -- the + // JSON serialization of the topic's delivery policy EffectiveDeliveryPolicy + // -- the JSON serialization of the effective delivery policy that takes into + // account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListEndpointsByPlatformApplication + // action to retrieve additional records that are available after the first + // page results. + NextToken *string `type:"string"` + + // PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationInput) GoString() string { + return s.String() +} + +// Response for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // Endpoints returned for ListEndpointsByPlatformApplication action. + Endpoints []*Endpoint `type:"list"` + + // NextToken string is returned when calling ListEndpointsByPlatformApplication + // action if additional records are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for ListPlatformApplications action. +type ListPlatformApplicationsInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListPlatformApplications action to + // retrieve additional records that are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListPlatformApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsInput) GoString() string { + return s.String() +} + +// Response for ListPlatformApplications action. +type ListPlatformApplicationsOutput struct { + _ struct{} `type:"structure"` + + // NextToken string is returned when calling ListPlatformApplications action + // if additional records are available after the first page results. + NextToken *string `type:"string"` + + // Platform applications returned when calling ListPlatformApplications action. + PlatformApplications []*PlatformApplication `type:"list"` +} + +// String returns the string representation +func (s ListPlatformApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptionsByTopic request. + NextToken *string `type:"string"` + + // The ARN of the topic for which you wish to find subscriptions. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptionsByTopic request. This element + // is returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptions action. +type ListSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptions request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptions action +type ListSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptions request. This element is + // returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsOutput) GoString() string { + return s.String() +} + +type ListTopicsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListTopics request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTopicsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsInput) GoString() string { + return s.String() +} + +// Response for ListTopics action. +type ListTopicsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListTopics request. This element is returned + // if there are additional topics to retrieve. + NextToken *string `type:"string"` + + // A list of topic ARNs. + Topics []*Topic `type:"list"` +} + +// String returns the string representation +func (s ListTopicsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsOutput) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see Publish (http://docs.aws.amazon.com/sns/latest/api/API_Publish.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). For more information, see Using Amazon +// SNS Message Attributes (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + // + // BinaryValue is automatically base64 encoded/decoded by the SDK. + BinaryValue []byte `type:"blob"` + + // Amazon SNS supports the following logical data types: String, Number, and + // Binary. For more information, see Message Attribute Data Types (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html#SNSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +// Platform application object. +type PlatformApplication struct { + _ struct{} `type:"structure"` + + // Attributes for platform application object. + Attributes map[string]*string `type:"map"` + + // PlatformApplicationArn for platform application object. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s PlatformApplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformApplication) GoString() string { + return s.String() +} + +// Input for Publish action. +type PublishInput struct { + _ struct{} `type:"structure"` + + // The message you want to send to the topic. + // + // If you want to send the same message to all transport protocols, include + // the text of the message as a String value. + // + // If you want to send different messages for each transport protocol, set + // the value of the MessageStructure parameter to json and use a JSON object + // for the Message parameter. See the Examples section for the format of the + // JSON object. + // + // Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size + // (262144 bytes, not 262144 characters). + // + // JSON-specific constraints: Keys in the JSON object that correspond to supported + // transport protocols must have simple JSON string values. The values will + // be parsed (unescaped) before they are used in outgoing messages. Outbound + // notifications are JSON encoded (meaning that the characters will be reescaped + // for sending). Values have a minimum length of 0 (the empty string, "", is + // allowed). Values have a maximum length bounded by the overall message size + // (so, including multiple protocols may limit message sizes). Non-string values + // will cause the key to be ignored. Keys that do not correspond to supported + // transport protocols are ignored. Duplicate keys are not allowed. Failure + // to parse or validate any key or value in the message will cause the Publish + // call to return an error (no partial delivery). + Message *string `type:"string" required:"true"` + + // Message attributes for Publish action. + MessageAttributes map[string]*MessageAttributeValue `locationNameKey:"Name" locationNameValue:"Value" type:"map"` + + // Set MessageStructure to json if you want to send a different message for + // each protocol. For example, using one publish action, you can send a short + // message to your SMS subscribers and a longer message to your email subscribers. + // If you set MessageStructure to json, the value of the Message parameter must: + // + // be a syntactically valid JSON object; and contain at least a top-level + // JSON key of "default" with a value that is a string. You can define other + // top-level keys that define the message you want to send to a specific transport + // protocol (e.g., "http"). + // + // For information about sending different messages for each protocol using + // the AWS Management Console, go to Create Different Messages for Each Protocol + // (http://docs.aws.amazon.com/sns/latest/gsg/Publish.html#sns-message-formatting-by-protocol) + // in the Amazon Simple Notification Service Getting Started Guide. + // + // Valid value: json + MessageStructure *string `type:"string"` + + // Optional parameter to be used as the "Subject" line when the message is delivered + // to email endpoints. This field will also be included, if present, in the + // standard JSON messages delivered to other endpoints. + // + // Constraints: Subjects must be ASCII text that begins with a letter, number, + // or punctuation mark; must not include line breaks or control characters; + // and must be less than 100 characters long. + Subject *string `type:"string"` + + // Either TopicArn or EndpointArn, but not both. + TargetArn *string `type:"string"` + + // The topic you want to publish to. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s PublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishInput) GoString() string { + return s.String() +} + +// Response for Publish action. +type PublishOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier assigned to the published message. + // + // Length Constraint: Maximum 100 characters + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s PublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishOutput) GoString() string { + return s.String() +} + +// Input for RemovePermission action. +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The unique label of the statement you want to remove. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +// Input for SetEndpointAttributes action. +type SetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the endpoint attributes. Attributes in this map include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. + // Amazon SNS does not use this data. The data must be in UTF-8 format and less + // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. + // Amazon SNS will set this to false when a notification service indicates to + // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically + // after updating Token. Token -- device token, also referred to as a registration + // id, for an app and mobile device. This is returned from the notification + // service when an app and mobile device are registered with the notification + // service. + Attributes map[string]*string `type:"map" required:"true"` + + // EndpointArn used for SetEndpointAttributes action. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesInput) GoString() string { + return s.String() +} + +type SetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetPlatformApplicationAttributes action. +type SetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the platform application attributes. Attributes in this map include + // the following: + // + // PlatformCredential -- The credential received from the notification service. + // For APNS/APNS_SANDBOX, PlatformCredential is "private key". For GCM, PlatformCredential + // is "API key". For ADM, PlatformCredential is "client secret". PlatformPrincipal + // -- The principal received from the notification service. For APNS/APNS_SANDBOX, + // PlatformPrincipal is "SSL certificate". For GCM, PlatformPrincipal is not + // applicable. For ADM, PlatformPrincipal is "client id". EventEndpointCreated + // -- Topic ARN to which EndpointCreated event notifications should be sent. + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. EventEndpointUpdated -- Topic ARN to which EndpointUpdate + // event notifications should be sent. EventDeliveryFailure -- Topic ARN to + // which DeliveryFailure event notifications should be sent upon Direct Publish + // delivery failure (permanent) to one of the application's endpoints. + Attributes map[string]*string `type:"map" required:"true"` + + // PlatformApplicationArn for SetPlatformApplicationAttributes action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +type SetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetSubscriptionAttributes action. +type SetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the subscriptions + // attributes are mutable. + // + // Valid values: DeliveryPolicy | RawMessageDelivery + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute in JSON format. + AttributeValue *string `type:"string"` + + // The ARN of the subscription to modify. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +type SetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetTopicAttributes action. +type SetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the topic's attributes + // are mutable. + // + // Valid values: Policy | DisplayName | DeliveryPolicy + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute. + AttributeValue *string `type:"string"` + + // The ARN of the topic to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesInput) GoString() string { + return s.String() +} + +type SetTopicAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for Subscribe action. +type SubscribeInput struct { + _ struct{} `type:"structure"` + + // The endpoint that you want to receive notifications. Endpoints vary by protocol: + // + // For the http protocol, the endpoint is an URL beginning with "http://" + // For the https protocol, the endpoint is a URL beginning with "https://" For + // the email protocol, the endpoint is an email address For the email-json protocol, + // the endpoint is an email address For the sms protocol, the endpoint is a + // phone number of an SMS-enabled device For the sqs protocol, the endpoint + // is the ARN of an Amazon SQS queue For the application protocol, the endpoint + // is the EndpointArn of a mobile app and device. + Endpoint *string `type:"string"` + + // The protocol you want to use. Supported protocols include: + // + // http -- delivery of JSON-encoded message via HTTP POST https -- delivery + // of JSON-encoded message via HTTPS POST email -- delivery of message via + // SMTP email-json -- delivery of JSON-encoded message via SMTP sms -- delivery + // of message via SMS sqs -- delivery of JSON-encoded message to an Amazon + // SQS queue application -- delivery of JSON-encoded message to an EndpointArn + // for a mobile app and device. + Protocol *string `type:"string" required:"true"` + + // The ARN of the topic you want to subscribe to. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeInput) GoString() string { + return s.String() +} + +// Response for Subscribe action. +type SubscribeOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription, if the service was able to create a subscription + // immediately (without requiring endpoint owner confirmation). + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s SubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeOutput) GoString() string { + return s.String() +} + +// A wrapper type for the attributes of an Amazon SNS subscription. +type Subscription struct { + _ struct{} `type:"structure"` + + // The subscription's endpoint (format depends on the protocol). + Endpoint *string `type:"string"` + + // The subscription's owner. + Owner *string `type:"string"` + + // The subscription's protocol. + Protocol *string `type:"string"` + + // The subscription's ARN. + SubscriptionArn *string `type:"string"` + + // The ARN of the subscription's topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscription) GoString() string { + return s.String() +} + +// A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a +// topic's attributes, use GetTopicAttributes. +type Topic struct { + _ struct{} `type:"structure"` + + // The topic's ARN. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Topic) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Topic) GoString() string { + return s.String() +} + +// Input for Unsubscribe action. +type UnsubscribeInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription to be deleted. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeInput) GoString() string { + return s.String() +} + +type UnsubscribeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeOutput) GoString() string { + return s.String() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns/service.go new file mode 100644 index 00000000..7db07936 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns/service.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns + +import ( + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Simple Notification Service (Amazon SNS) is a web service that enables +// you to build distributed web-enabled applications. Applications can use Amazon +// SNS to easily push real-time notification messages to interested subscribers +// over multiple delivery protocols. For more information about this product +// see http://aws.amazon.com/sns (http://aws.amazon.com/sns/). For detailed +// information about Amazon SNS features and their associated API calls, see +// the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/). +// +// We also provide SDKs that enable you to access Amazon SNS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: cryptographically signing your service requests, retrying +// requests, and handling error responses. For a list of available SDKs, go +// to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SNS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sns" + +// New creates a new instance of the SNS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SNS client from just a session. +// svc := sns.New(mySession) +// +// // Create a SNS client with additional configuration +// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SNS { + svc := &SNS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SNS operation and runs any +// custom request initialization. +func (c *SNS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/LICENSE.txt b/Godeps/_workspace/src/github.com/cihub/seelog/LICENSE.txt deleted file mode 100644 index 8c706814..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/LICENSE.txt +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2012, Cloud Instruments Co., Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Cloud Instruments Co., Ltd. nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/README.markdown b/Godeps/_workspace/src/github.com/cihub/seelog/README.markdown deleted file mode 100644 index 7dd1ab35..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/README.markdown +++ /dev/null @@ -1,116 +0,0 @@ -Seelog -======= - -Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages. -It is natively written in the [Go](http://golang.org/) programming language. - -[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest) - -Features ------------------- - -* Xml configuring to be able to change logger parameters without recompilation -* Changing configurations on the fly without app restart -* Possibility to set different log configurations for different project files and functions -* Adjustable message formatting -* Simultaneous log output to multiple streams -* Choosing logger priority strategy to minimize performance hit -* Different output writers - * Console writer - * File writer - * Buffered writer (Chunk writer) - * Rolling log writer (Logging with rotation) - * SMTP writer - * Others... (See [Wiki](https://github.com/cihub/seelog/wiki)) -* Log message wrappers (JSON, XML, etc.) -* Global variables and functions for easy usage in standalone apps -* Functions for flexible usage in libraries - -Quick-start ------------ - -```go -package main - -import log "github.com/cihub/seelog" - -func main() { - defer log.Flush() - log.Info("Hello from Seelog!") -} -``` - -Installation ------------- - -If you don't have the Go development environment installed, visit the -[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command: - -``` -go get -u github.com/cihub/seelog -``` - -*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get') - -Documentation ---------------- - -Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki - -Examples ---------------- - -Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples) - -Issues ---------------- - -Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues - -Changelog ---------------- -* **v2.6** : Config using code and custom formatters - * Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported). - * Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters) - * Bugfixes and internal improvements. -* **v2.5** : Interaction with other systems. Part 2: custom receivers - * Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers) - * Added 'LoggerFromCustomReceiver' - * Added 'LoggerFromWriterWithMinLevelAndFormat' - * Added 'LoggerFromCustomReceiver' - * Added 'LoggerFromParamConfigAs...' -* **v2.4** : Interaction with other systems. Part 1: wrapping seelog - * Added configurable caller stack skip logic - * Added 'SetAdditionalStackDepth' to 'LoggerInterface' -* **v2.3** : Rethinking 'rolling' receiver - * Reimplemented 'rolling' receiver - * Added 'Max rolls' feature for 'rolling' receiver with type='date' - * Fixed 'rolling' receiver issue: renaming on Windows -* **v2.2** : go1.0 compatibility point [go1.0 tag] - * Fixed internal bugs - * Added 'ANSI n [;k]' format identifier: %EscN - * Made current release go1 compatible -* **v2.1** : Some new features - * Rolling receiver archiving option. - * Added format identifier: %Line - * Smtp: added paths to PEM files directories - * Added format identifier: %FuncShort - * Warn, Error and Critical methods now return an error -* **v2.0** : Second major release. BREAKING CHANGES. - * Support of binaries with stripped symbols - * Added log strategy: adaptive - * Critical message now forces Flush() - * Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast - * Added receiver: conn (network connection writer) - * BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle - * Bug fixes -* **v1.0** : Initial release. Features: - * Xml config - * Changing configurations on the fly without app restart - * Contraints and exceptions - * Formatting - * Log strategies: sync, async loop, async timer - * Receivers: buffered, console, file, rolling, smtp - - - diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_adaptive_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_adaptive_test.go deleted file mode 100644 index e9919493..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_adaptive_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "strconv" - "testing" -) - -func countSequencedRowsInFile(filePath string) (int64, error) { - bts, err := ioutil.ReadFile(filePath) - if err != nil { - return 0, err - } - - bufReader := bufio.NewReader(bytes.NewBuffer(bts)) - - var gotCounter int64 - for { - line, _, bufErr := bufReader.ReadLine() - if bufErr != nil && bufErr != io.EOF { - return 0, bufErr - } - - lineString := string(line) - if lineString == "" { - break - } - - intVal, atoiErr := strconv.ParseInt(lineString, 10, 64) - if atoiErr != nil { - return 0, atoiErr - } - - if intVal != gotCounter { - return 0, fmt.Errorf("wrong order: %d Expected: %d\n", intVal, gotCounter) - } - - gotCounter++ - } - - return gotCounter, nil -} - -func Test_Adaptive(t *testing.T) { - fileName := "beh_test_adaptive.log" - count := 100 - - Current.Close() - - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - return - } - defer func() { - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - } - }() - - testConfig := ` - - - - - - - -` - - logger, _ := LoggerFromConfigAsString(testConfig) - - err := ReplaceLogger(logger) - if err != nil { - t.Error(err) - return - } - - for i := 0; i < count; i++ { - Trace(strconv.Itoa(i)) - } - - Flush() - - gotCount, err := countSequencedRowsInFile(fileName) - if err != nil { - t.Error(err) - return - } - - if int64(count) != gotCount { - t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) - return - } - - Current.Close() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_adaptivelogger.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_adaptivelogger.go deleted file mode 100644 index 0c640cae..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_adaptivelogger.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "math" - "time" -) - -var ( - adaptiveLoggerMaxInterval = time.Minute - adaptiveLoggerMaxCriticalMsgCount = uint32(1000) -) - -// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like -// an async timer logger, but its interval depends on the current message count -// in the queue. -// -// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c: -// I = m + (C - Min(c, C)) / C * (M - m) -type asyncAdaptiveLogger struct { - asyncLogger - minInterval time.Duration - criticalMsgCount uint32 - maxInterval time.Duration -} - -// NewAsyncLoopLogger creates a new asynchronous adaptive logger -func NewAsyncAdaptiveLogger( - config *logConfig, - minInterval time.Duration, - maxInterval time.Duration, - criticalMsgCount uint32) (*asyncAdaptiveLogger, error) { - - if minInterval <= 0 { - return nil, errors.New("async adaptive logger min interval should be > 0") - } - - if maxInterval > adaptiveLoggerMaxInterval { - return nil, fmt.Errorf("async adaptive logger max interval should be <= %s", - adaptiveLoggerMaxInterval) - } - - if criticalMsgCount <= 0 { - return nil, errors.New("async adaptive logger critical msg count should be > 0") - } - - if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount { - return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s", - adaptiveLoggerMaxInterval) - } - - asnAdaptiveLogger := new(asyncAdaptiveLogger) - - asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config) - asnAdaptiveLogger.minInterval = minInterval - asnAdaptiveLogger.maxInterval = maxInterval - asnAdaptiveLogger.criticalMsgCount = criticalMsgCount - - go asnAdaptiveLogger.processQueue() - - return asnAdaptiveLogger, nil -} - -func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) { - asnAdaptiveLogger.queueHasElements.L.Lock() - defer asnAdaptiveLogger.queueHasElements.L.Unlock() - - for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() { - asnAdaptiveLogger.queueHasElements.Wait() - } - - if asnAdaptiveLogger.Closed() { - return true, asnAdaptiveLogger.msgQueue.Len() - } - - asnAdaptiveLogger.processQueueElement() - return false, asnAdaptiveLogger.msgQueue.Len() - 1 -} - -// I = m + (C - Min(c, C)) / C * (M - m) => -// I = m + cDiff * mDiff, -// cDiff = (C - Min(c, C)) / C) -// mDiff = (M - m) -func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration { - critCountF := float64(asnAdaptiveLogger.criticalMsgCount) - cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF - mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval) - - return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff) -} - -func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() { - for !asnAdaptiveLogger.Closed() { - closed, itemCount := asnAdaptiveLogger.processItem() - - if closed { - break - } - - interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount) - - <-time.After(interval) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynclogger.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynclogger.go deleted file mode 100644 index 75231067..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynclogger.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "container/list" - "fmt" - "sync" -) - -// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush. -const ( - MaxQueueSize = 10000 -) - -type msgQueueItem struct { - level LogLevel - context LogContextInterface - message fmt.Stringer -} - -// asyncLogger represents common data for all asynchronous loggers -type asyncLogger struct { - commonLogger - msgQueue *list.List - queueHasElements *sync.Cond -} - -// newAsyncLogger creates a new asynchronous logger -func newAsyncLogger(config *logConfig) *asyncLogger { - asnLogger := new(asyncLogger) - - asnLogger.msgQueue = list.New() - asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex)) - - asnLogger.commonLogger = *newCommonLogger(config, asnLogger) - - return asnLogger -} - -func (asnLogger *asyncLogger) innerLog( - level LogLevel, - context LogContextInterface, - message fmt.Stringer) { - - asnLogger.addMsgToQueue(level, context, message) -} - -func (asnLogger *asyncLogger) Close() { - asnLogger.m.Lock() - defer asnLogger.m.Unlock() - - if !asnLogger.Closed() { - asnLogger.flushQueue(true) - asnLogger.config.RootDispatcher.Flush() - - if err := asnLogger.config.RootDispatcher.Close(); err != nil { - reportInternalError(err) - } - - asnLogger.closedM.Lock() - asnLogger.closed = true - asnLogger.closedM.Unlock() - asnLogger.queueHasElements.Broadcast() - } -} - -func (asnLogger *asyncLogger) Flush() { - asnLogger.m.Lock() - defer asnLogger.m.Unlock() - - if !asnLogger.Closed() { - asnLogger.flushQueue(true) - asnLogger.config.RootDispatcher.Flush() - } -} - -func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) { - if lockNeeded { - asnLogger.queueHasElements.L.Lock() - defer asnLogger.queueHasElements.L.Unlock() - } - - for asnLogger.msgQueue.Len() > 0 { - asnLogger.processQueueElement() - } -} - -func (asnLogger *asyncLogger) processQueueElement() { - if asnLogger.msgQueue.Len() > 0 { - backElement := asnLogger.msgQueue.Front() - msg, _ := backElement.Value.(msgQueueItem) - asnLogger.processLogMsg(msg.level, msg.message, msg.context) - asnLogger.msgQueue.Remove(backElement) - } -} - -func (asnLogger *asyncLogger) addMsgToQueue( - level LogLevel, - context LogContextInterface, - message fmt.Stringer) { - - if !asnLogger.Closed() { - asnLogger.queueHasElements.L.Lock() - defer asnLogger.queueHasElements.L.Unlock() - - if asnLogger.msgQueue.Len() >= MaxQueueSize { - fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize) - asnLogger.flushQueue(false) - } - - queueItem := msgQueueItem{level, context, message} - - asnLogger.msgQueue.PushBack(queueItem) - asnLogger.queueHasElements.Broadcast() - } else { - err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message) - reportInternalError(err) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asyncloop_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asyncloop_test.go deleted file mode 100644 index 142c4fcf..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asyncloop_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "strconv" - "testing" -) - -func Test_Asyncloop(t *testing.T) { - fileName := "beh_test_asyncloop.log" - count := 100 - - Current.Close() - - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - return - } - defer func() { - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - } - }() - - testConfig := ` - - - - - - - -` - - logger, _ := LoggerFromConfigAsString(testConfig) - err := ReplaceLogger(logger) - if err != nil { - t.Error(err) - return - } - - for i := 0; i < count; i++ { - Trace(strconv.Itoa(i)) - } - - Flush() - - gotCount, err := countSequencedRowsInFile(fileName) - if err != nil { - t.Error(err) - return - } - - if int64(count) != gotCount { - t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) - return - } - - Current.Close() -} - -func Test_AsyncloopOff(t *testing.T) { - fileName := "beh_test_asyncloopoff.log" - count := 100 - - Current.Close() - - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - return - } - - testConfig := ` - - - - - - - -` - - logger, _ := LoggerFromConfigAsString(testConfig) - err := ReplaceLogger(logger) - if err != nil { - t.Error(err) - return - } - - for i := 0; i < count; i++ { - Trace(strconv.Itoa(i)) - } - - Flush() - - ex, err := fileExists(fileName) - if err != nil { - t.Error(err) - } - if ex { - t.Errorf("logger at level OFF is not expected to create log file at all.") - defer func() { - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - } - }() - } - - Current.Close() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynclooplogger.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynclooplogger.go deleted file mode 100644 index 972467b3..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynclooplogger.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -// asyncLoopLogger represents asynchronous logger which processes the log queue in -// a 'for' loop -type asyncLoopLogger struct { - asyncLogger -} - -// NewAsyncLoopLogger creates a new asynchronous loop logger -func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger { - - asnLoopLogger := new(asyncLoopLogger) - - asnLoopLogger.asyncLogger = *newAsyncLogger(config) - - go asnLoopLogger.processQueue() - - return asnLoopLogger -} - -func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) { - asnLoopLogger.queueHasElements.L.Lock() - defer asnLoopLogger.queueHasElements.L.Unlock() - - for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() { - asnLoopLogger.queueHasElements.Wait() - } - - if asnLoopLogger.Closed() { - return true - } - - asnLoopLogger.processQueueElement() - return false -} - -func (asnLoopLogger *asyncLoopLogger) processQueue() { - for !asnLoopLogger.Closed() { - closed := asnLoopLogger.processItem() - - if closed { - break - } - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynctimer_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynctimer_test.go deleted file mode 100644 index 37bfa6a9..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynctimer_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "strconv" - "testing" -) - -func Test_Asynctimer(t *testing.T) { - fileName := "beh_test_asynctimer.log" - count := 100 - - Current.Close() - - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - return - } - defer func() { - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - } - }() - - testConfig := ` - - - - - - - -` - - logger, _ := LoggerFromConfigAsString(testConfig) - err := ReplaceLogger(logger) - if err != nil { - t.Error(err) - return - } - - for i := 0; i < count; i++ { - Trace(strconv.Itoa(i)) - } - - Flush() - - gotCount, err := countSequencedRowsInFile(fileName) - if err != nil { - t.Error(err) - return - } - - if int64(count) != gotCount { - t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) - return - } - - Current.Close() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynctimerlogger.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynctimerlogger.go deleted file mode 100644 index 8118f205..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_asynctimerlogger.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "time" -) - -// asyncTimerLogger represents asynchronous logger which processes the log queue each -// 'duration' nanoseconds -type asyncTimerLogger struct { - asyncLogger - interval time.Duration -} - -// NewAsyncLoopLogger creates a new asynchronous loop logger -func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) { - - if interval <= 0 { - return nil, errors.New("async logger interval should be > 0") - } - - asnTimerLogger := new(asyncTimerLogger) - - asnTimerLogger.asyncLogger = *newAsyncLogger(config) - asnTimerLogger.interval = interval - - go asnTimerLogger.processQueue() - - return asnTimerLogger, nil -} - -func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) { - asnTimerLogger.queueHasElements.L.Lock() - defer asnTimerLogger.queueHasElements.L.Unlock() - - for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() { - asnTimerLogger.queueHasElements.Wait() - } - - if asnTimerLogger.Closed() { - return true - } - - asnTimerLogger.processQueueElement() - return false -} - -func (asnTimerLogger *asyncTimerLogger) processQueue() { - for !asnTimerLogger.Closed() { - closed := asnTimerLogger.processItem() - - if closed { - break - } - - <-time.After(asnTimerLogger.interval) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_synclogger.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_synclogger.go deleted file mode 100644 index 5a022ebc..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_synclogger.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" -) - -// syncLogger performs logging in the same goroutine where 'Trace/Debug/...' -// func was called -type syncLogger struct { - commonLogger -} - -// NewSyncLogger creates a new synchronous logger -func NewSyncLogger(config *logConfig) *syncLogger { - syncLogger := new(syncLogger) - - syncLogger.commonLogger = *newCommonLogger(config, syncLogger) - - return syncLogger -} - -func (syncLogger *syncLogger) innerLog( - level LogLevel, - context LogContextInterface, - message fmt.Stringer) { - - syncLogger.processLogMsg(level, message, context) -} - -func (syncLogger *syncLogger) Close() { - syncLogger.m.Lock() - defer syncLogger.m.Unlock() - - if !syncLogger.Closed() { - if err := syncLogger.config.RootDispatcher.Close(); err != nil { - reportInternalError(err) - } - syncLogger.closedM.Lock() - syncLogger.closed = true - syncLogger.closedM.Unlock() - } -} - -func (syncLogger *syncLogger) Flush() { - syncLogger.m.Lock() - defer syncLogger.m.Unlock() - - if !syncLogger.Closed() { - syncLogger.config.RootDispatcher.Flush() - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_synclogger_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/behavior_synclogger_test.go deleted file mode 100644 index ddcbbb60..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/behavior_synclogger_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "strconv" - "testing" -) - -func Test_Sync(t *testing.T) { - fileName := "beh_test_sync.log" - count := 100 - - Current.Close() - - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - return - } - defer func() { - if e := tryRemoveFile(fileName); e != nil { - t.Error(e) - } - }() - - testConfig := ` - - - - - - - -` - - logger, _ := LoggerFromConfigAsString(testConfig) - err := ReplaceLogger(logger) - if err != nil { - t.Error(err) - return - } - - for i := 0; i < count; i++ { - Trace(strconv.Itoa(i)) - } - - gotCount, err := countSequencedRowsInFile(fileName) - if err != nil { - t.Error(err) - return - } - - if int64(count) != gotCount { - t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) - return - } - - Current.Close() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_config.go b/Godeps/_workspace/src/github.com/cihub/seelog/cfg_config.go deleted file mode 100644 index c7d84812..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_config.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bytes" - "encoding/xml" - "io" - "os" -) - -// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml. -func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) { - file, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer file.Close() - - conf, err := configFromReader(file) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml. -func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) { - conf, err := configFromReader(bytes.NewBuffer(data)) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml. -func LoggerFromConfigAsString(data string) (LoggerInterface, error) { - return LoggerFromConfigAsBytes([]byte(data)) -} - -// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options. -// See 'CfgParseParams' comments. -func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) { - file, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer file.Close() - - conf, err := configFromReaderWithConfig(file, parserParams) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options. -// See 'CfgParseParams' comments. -func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) { - conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options. -// See 'CfgParseParams' comments. -func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) { - return LoggerFromParamConfigAsBytes([]byte(data), parserParams) -} - -// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) -func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) { - return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) -} - -// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the -// receiver with minimal level = minLevel and with specified format. -// -// All messages with level more or equal to minLevel will be written to output and -// formatted using the default seelog format. -// -// Can be called for usage with non-Seelog systems -func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) { - constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl) - if err != nil { - return nil, err - } - formatter, err := NewFormatter(format) - if err != nil { - return nil, err - } - dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output}) - if err != nil { - return nil, err - } - - conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node. -// It should contain valid seelog xml, except for root node name. -func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) { - conf, err := configFromXMLDecoder(xmlParser, rootNode) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} - -// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the -// receiver. -// -// All messages will be sent to the specified custom receiver without additional -// formatting ('%Msg' format is used). -// -// Check CustomReceiver, RegisterReceiver for additional info. -// -// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated -// by the config parser while parsing config. So, if you are not planning to use the -// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and -// loading from config, just leave AfterParse implementation empty. -// -// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized -// instance that implements CustomReceiver. So, fill it with data and perform any initialization -// logic before calling this func and it won't be lost. -// -// So: -// * RegisterReceiver takes value just to get the reflect.Type from it and then -// instantiate it as many times as config is reloaded. -// -// * LoggerFromCustomReceiver takes value and uses it without modification and -// reinstantiation, directy passing it to the dispatcher tree. -func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) { - constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl) - if err != nil { - return nil, err - } - - output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{}) - if err != nil { - return nil, err - } - dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output}) - if err != nil { - return nil, err - } - - conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) - if err != nil { - return nil, err - } - - return createLoggerFromFullConfig(conf) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_errors.go b/Godeps/_workspace/src/github.com/cihub/seelog/cfg_errors.go deleted file mode 100644 index c1fb4d10..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_errors.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" -) - -var ( - errNodeMustHaveChildren = errors.New("node must have children") - errNodeCannotHaveChildren = errors.New("node cannot have children") -) - -type unexpectedChildElementError struct { - baseError -} - -func newUnexpectedChildElementError(msg string) *unexpectedChildElementError { - custmsg := "Unexpected child element: " + msg - return &unexpectedChildElementError{baseError{message: custmsg}} -} - -type missingArgumentError struct { - baseError -} - -func newMissingArgumentError(nodeName, attrName string) *missingArgumentError { - custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute" - return &missingArgumentError{baseError{message: custmsg}} -} - -type unexpectedAttributeError struct { - baseError -} - -func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError { - custmsg := nodeName + " has unexpected attribute: " + attr - return &unexpectedAttributeError{baseError{message: custmsg}} -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_logconfig.go b/Godeps/_workspace/src/github.com/cihub/seelog/cfg_logconfig.go deleted file mode 100644 index 6ba6f9a9..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_logconfig.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" -) - -type loggerTypeFromString uint8 - -const ( - syncloggerTypeFromString = iota - asyncLooploggerTypeFromString - asyncTimerloggerTypeFromString - adaptiveLoggerTypeFromString - defaultloggerTypeFromString = asyncLooploggerTypeFromString -) - -const ( - syncloggerTypeFromStringStr = "sync" - asyncloggerTypeFromStringStr = "asyncloop" - asyncTimerloggerTypeFromStringStr = "asynctimer" - adaptiveLoggerTypeFromStringStr = "adaptive" -) - -// asyncTimerLoggerData represents specific data for async timer logger -type asyncTimerLoggerData struct { - AsyncInterval uint32 -} - -// adaptiveLoggerData represents specific data for adaptive timer logger -type adaptiveLoggerData struct { - MinInterval uint32 - MaxInterval uint32 - CriticalMsgCount uint32 -} - -var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{ - syncloggerTypeFromString: syncloggerTypeFromStringStr, - asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr, - asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr, - adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr, -} - -// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful. -func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) { - for logType, logTypeStr := range loggerTypeToStringRepresentations { - if logTypeStr == logTypeString { - return logType, true - } - } - - return 0, false -} - -// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules -// (general constraints and exceptions) -type logConfig struct { - Constraints logLevelConstraints // General log level rules (>min and - - - - - -` - - conf, err := configFromReader(strings.NewReader(testConfig)) - if err != nil { - t.Errorf("parse error: %s\n", err.Error()) - return - } - - context, err := currentContext(nil) - if err != nil { - t.Errorf("cannot get current context:" + err.Error()) - return - } - firstContext, err := getFirstContext() - if err != nil { - t.Errorf("cannot get current context:" + err.Error()) - return - } - secondContext, err := getSecondContext() - if err != nil { - t.Errorf("cannot get current context:" + err.Error()) - return - } - - if !conf.IsAllowed(TraceLvl, context) { - t.Errorf("error: deny trace in current context") - } - if conf.IsAllowed(TraceLvl, firstContext) { - t.Errorf("error: allow trace in first context") - } - if conf.IsAllowed(ErrorLvl, context) { - t.Errorf("error: allow error in current context") - } - if !conf.IsAllowed(ErrorLvl, secondContext) { - t.Errorf("error: deny error in second context") - } - - // cache test - if !conf.IsAllowed(TraceLvl, context) { - t.Errorf("error: deny trace in current context") - } - if conf.IsAllowed(TraceLvl, firstContext) { - t.Errorf("error: allow trace in first context") - } - if conf.IsAllowed(ErrorLvl, context) { - t.Errorf("error: allow error in current context") - } - if !conf.IsAllowed(ErrorLvl, secondContext) { - t.Errorf("error: deny error in second context") - } -} - -func getFirstContext() (LogContextInterface, error) { - return currentContext(nil) -} - -func getSecondContext() (LogContextInterface, error) { - return currentContext(nil) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_parser.go b/Godeps/_workspace/src/github.com/cihub/seelog/cfg_parser.go deleted file mode 100644 index 7fb9aabf..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_parser.go +++ /dev/null @@ -1,1238 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "crypto/tls" - "encoding/xml" - "errors" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Names of elements of seelog config. -const ( - seelogConfigID = "seelog" - outputsID = "outputs" - formatsID = "formats" - minLevelID = "minlevel" - maxLevelID = "maxlevel" - levelsID = "levels" - exceptionsID = "exceptions" - exceptionID = "exception" - funcPatternID = "funcpattern" - filePatternID = "filepattern" - formatID = "format" - formatAttrID = "format" - formatKeyAttrID = "id" - outputFormatID = "formatid" - pathID = "path" - fileWriterID = "file" - smtpWriterID = "smtp" - senderaddressID = "senderaddress" - senderNameID = "sendername" - recipientID = "recipient" - mailHeaderID = "header" - mailHeaderNameID = "name" - mailHeaderValueID = "value" - addressID = "address" - hostNameID = "hostname" - hostPortID = "hostport" - userNameID = "username" - userPassID = "password" - cACertDirpathID = "cacertdirpath" - subjectID = "subject" - splitterDispatcherID = "splitter" - consoleWriterID = "console" - customReceiverID = "custom" - customNameAttrID = "name" - customNameDataAttrPrefix = "data-" - filterDispatcherID = "filter" - filterLevelsAttrID = "levels" - rollingfileWriterID = "rollingfile" - rollingFileTypeAttr = "type" - rollingFilePathAttr = "filename" - rollingFileMaxSizeAttr = "maxsize" - rollingFileMaxRollsAttr = "maxrolls" - rollingFileNameModeAttr = "namemode" - rollingFileDataPatternAttr = "datepattern" - rollingFileArchiveAttr = "archivetype" - rollingFileArchivePathAttr = "archivepath" - bufferedWriterID = "buffered" - bufferedSizeAttr = "size" - bufferedFlushPeriodAttr = "flushperiod" - loggerTypeFromStringAttr = "type" - asyncLoggerIntervalAttr = "asyncinterval" - adaptLoggerMinIntervalAttr = "mininterval" - adaptLoggerMaxIntervalAttr = "maxinterval" - adaptLoggerCriticalMsgCountAttr = "critmsgcount" - predefinedPrefix = "std:" - connWriterID = "conn" - connWriterAddrAttr = "addr" - connWriterNetAttr = "net" - connWriterReconnectOnMsgAttr = "reconnectonmsg" - connWriterUseTLSAttr = "tls" - connWriterInsecureSkipVerifyAttr = "insecureskipverify" -) - -// CustomReceiverProducer is the signature of the function CfgParseParams needs to create -// custom receivers. -type CustomReceiverProducer func(CustomReceiverInitArgs) (CustomReceiver, error) - -// CfgParseParams represent specific parse options or flags used by parser. It is used if seelog parser needs -// some special directives or additional info to correctly parse a config. -type CfgParseParams struct { - // CustomReceiverProducers expose the same functionality as RegisterReceiver func - // but only in the scope (context) of the config parse func instead of a global package scope. - // - // It means that if you use custom receivers in your code, you may either register them globally once with - // RegisterReceiver or you may call funcs like LoggerFromParamConfigAsFile (with 'ParamConfig') - // and use CustomReceiverProducers to provide custom producer funcs. - // - // A producer func is called when config parser processes a '' element. It takes the 'name' attribute - // of the element and tries to find a match in two places: - // 1) CfgParseParams.CustomReceiverProducers map - // 2) Global type map, filled by RegisterReceiver - // - // If a match is found in the CustomReceiverProducers map, parser calls the corresponding producer func - // passing the init args to it. The func takes exactly the same args as CustomReceiver.AfterParse. - // The producer func must return a correct receiver or an error. If case of error, seelog will behave - // in the same way as with any other config error. - // - // You may use this param to set custom producers in case you need to pass some context when instantiating - // a custom receiver or if you frequently change custom receivers with different parameters or in any other - // situation where package-level registering (RegisterReceiver) is not an option for you. - CustomReceiverProducers map[string]CustomReceiverProducer -} - -func (cfg *CfgParseParams) String() string { - return fmt.Sprintf("CfgParams: {custom_recs=%d}", len(cfg.CustomReceiverProducers)) -} - -type elementMapEntry struct { - constructor func(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) -} - -var elementMap map[string]elementMapEntry -var predefinedFormats map[string]*formatter - -func init() { - elementMap = map[string]elementMapEntry{ - fileWriterID: {createfileWriter}, - splitterDispatcherID: {createSplitter}, - customReceiverID: {createCustomReceiver}, - filterDispatcherID: {createFilter}, - consoleWriterID: {createConsoleWriter}, - rollingfileWriterID: {createRollingFileWriter}, - bufferedWriterID: {createbufferedWriter}, - smtpWriterID: {createSMTPWriter}, - connWriterID: {createconnWriter}, - } - - err := fillPredefinedFormats() - if err != nil { - panic(fmt.Sprintf("Seelog couldn't start: predefined formats creation failed. Error: %s", err.Error())) - } -} - -func fillPredefinedFormats() error { - predefinedFormatsWithoutPrefix := map[string]string{ - "xml-debug": `%Lev%Msg%RelFile%Func%Line`, - "xml-debug-short": `%Ns%l%Msg

%RelFile

%Func`, - "xml": `%Lev%Msg`, - "xml-short": `%Ns%l%Msg`, - - "json-debug": `{"time":%Ns,"lev":"%Lev","msg":"%Msg","path":"%RelFile","func":"%Func","line":"%Line"}`, - "json-debug-short": `{"t":%Ns,"l":"%Lev","m":"%Msg","p":"%RelFile","f":"%Func"}`, - "json": `{"time":%Ns,"lev":"%Lev","msg":"%Msg"}`, - "json-short": `{"t":%Ns,"l":"%Lev","m":"%Msg"}`, - - "debug": `[%LEVEL] %RelFile:%Func.%Line %Date %Time %Msg%n`, - "debug-short": `[%LEVEL] %Date %Time %Msg%n`, - "fast": `%Ns %l %Msg%n`, - } - - predefinedFormats = make(map[string]*formatter) - - for formatKey, format := range predefinedFormatsWithoutPrefix { - formatter, err := NewFormatter(format) - if err != nil { - return err - } - - predefinedFormats[predefinedPrefix+formatKey] = formatter - } - - return nil -} - -// configFromXMLDecoder parses data from a given XML decoder. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (*configForParsing, error) { - return configFromXMLDecoderWithConfig(xmlParser, rootNode, nil) -} - -// configFromXMLDecoderWithConfig parses data from a given XML decoder. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromXMLDecoderWithConfig(xmlParser *xml.Decoder, rootNode xml.Token, cfg *CfgParseParams) (*configForParsing, error) { - _, ok := rootNode.(xml.StartElement) - if !ok { - return nil, errors.New("rootNode must be XML startElement") - } - - config, err := unmarshalNode(xmlParser, rootNode) - if err != nil { - return nil, err - } - if config == nil { - return nil, errors.New("xml has no content") - } - - return configFromXMLNodeWithConfig(config, cfg) -} - -// configFromReader parses data from a given reader. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromReader(reader io.Reader) (*configForParsing, error) { - return configFromReaderWithConfig(reader, nil) -} - -// configFromReaderWithConfig parses data from a given reader. -// Returns parsed config which can be used to create logger in case no errors occured. -// Returns error if format is incorrect or anything happened. -func configFromReaderWithConfig(reader io.Reader, cfg *CfgParseParams) (*configForParsing, error) { - config, err := unmarshalConfig(reader) - if err != nil { - return nil, err - } - - if config.name != seelogConfigID { - return nil, errors.New("root xml tag must be '" + seelogConfigID + "'") - } - - return configFromXMLNodeWithConfig(config, cfg) -} - -func configFromXMLNodeWithConfig(config *xmlNode, cfg *CfgParseParams) (*configForParsing, error) { - err := checkUnexpectedAttribute( - config, - minLevelID, - maxLevelID, - levelsID, - loggerTypeFromStringAttr, - asyncLoggerIntervalAttr, - adaptLoggerMinIntervalAttr, - adaptLoggerMaxIntervalAttr, - adaptLoggerCriticalMsgCountAttr, - ) - if err != nil { - return nil, err - } - - err = checkExpectedElements(config, optionalElement(outputsID), optionalElement(formatsID), optionalElement(exceptionsID)) - if err != nil { - return nil, err - } - - constraints, err := getConstraints(config) - if err != nil { - return nil, err - } - - exceptions, err := getExceptions(config) - if err != nil { - return nil, err - } - err = checkDistinctExceptions(exceptions) - if err != nil { - return nil, err - } - - formats, err := getFormats(config) - if err != nil { - return nil, err - } - - dispatcher, err := getOutputsTree(config, formats, cfg) - if err != nil { - // If we open several files, but then fail to parse the config, we should close - // those files before reporting that config is invalid. - if dispatcher != nil { - dispatcher.Close() - } - - return nil, err - } - - loggerType, logData, err := getloggerTypeFromStringData(config) - if err != nil { - return nil, err - } - - return newFullLoggerConfig(constraints, exceptions, dispatcher, loggerType, logData, cfg) -} - -func getConstraints(node *xmlNode) (logLevelConstraints, error) { - minLevelStr, isMinLevel := node.attributes[minLevelID] - maxLevelStr, isMaxLevel := node.attributes[maxLevelID] - levelsStr, isLevels := node.attributes[levelsID] - - if isLevels && (isMinLevel && isMaxLevel) { - return nil, errors.New("for level declaration use '" + levelsID + "'' OR '" + minLevelID + - "', '" + maxLevelID + "'") - } - - offString := LogLevel(Off).String() - - if (isLevels && strings.TrimSpace(levelsStr) == offString) || - (isMinLevel && !isMaxLevel && minLevelStr == offString) { - - return NewOffConstraints() - } - - if isLevels { - levels, err := parseLevels(levelsStr) - if err != nil { - return nil, err - } - return NewListConstraints(levels) - } - - var minLevel = LogLevel(TraceLvl) - if isMinLevel { - found := true - minLevel, found = LogLevelFromString(minLevelStr) - if !found { - return nil, errors.New("declared " + minLevelID + " not found: " + minLevelStr) - } - } - - var maxLevel = LogLevel(CriticalLvl) - if isMaxLevel { - found := true - maxLevel, found = LogLevelFromString(maxLevelStr) - if !found { - return nil, errors.New("declared " + maxLevelID + " not found: " + maxLevelStr) - } - } - - return NewMinMaxConstraints(minLevel, maxLevel) -} - -func parseLevels(str string) ([]LogLevel, error) { - levelsStrArr := strings.Split(strings.Replace(str, " ", "", -1), ",") - var levels []LogLevel - for _, levelStr := range levelsStrArr { - level, found := LogLevelFromString(levelStr) - if !found { - return nil, errors.New("declared level not found: " + levelStr) - } - - levels = append(levels, level) - } - - return levels, nil -} - -func getExceptions(config *xmlNode) ([]*LogLevelException, error) { - var exceptions []*LogLevelException - - var exceptionsNode *xmlNode - for _, child := range config.children { - if child.name == exceptionsID { - exceptionsNode = child - break - } - } - - if exceptionsNode == nil { - return exceptions, nil - } - - err := checkUnexpectedAttribute(exceptionsNode) - if err != nil { - return nil, err - } - - err = checkExpectedElements(exceptionsNode, multipleMandatoryElements("exception")) - if err != nil { - return nil, err - } - - for _, exceptionNode := range exceptionsNode.children { - if exceptionNode.name != exceptionID { - return nil, errors.New("incorrect nested element in exceptions section: " + exceptionNode.name) - } - - err := checkUnexpectedAttribute(exceptionNode, minLevelID, maxLevelID, levelsID, funcPatternID, filePatternID) - if err != nil { - return nil, err - } - - constraints, err := getConstraints(exceptionNode) - if err != nil { - return nil, errors.New("incorrect " + exceptionsID + " node: " + err.Error()) - } - - funcPattern, isFuncPattern := exceptionNode.attributes[funcPatternID] - filePattern, isFilePattern := exceptionNode.attributes[filePatternID] - if !isFuncPattern { - funcPattern = "*" - } - if !isFilePattern { - filePattern = "*" - } - - exception, err := NewLogLevelException(funcPattern, filePattern, constraints) - if err != nil { - return nil, errors.New("incorrect exception node: " + err.Error()) - } - - exceptions = append(exceptions, exception) - } - - return exceptions, nil -} - -func checkDistinctExceptions(exceptions []*LogLevelException) error { - for i, exception := range exceptions { - for j, exception1 := range exceptions { - if i == j { - continue - } - - if exception.FuncPattern() == exception1.FuncPattern() && - exception.FilePattern() == exception1.FilePattern() { - - return fmt.Errorf("there are two or more duplicate exceptions. Func: %v, file %v", - exception.FuncPattern(), exception.FilePattern()) - } - } - } - - return nil -} - -func getFormats(config *xmlNode) (map[string]*formatter, error) { - formats := make(map[string]*formatter, 0) - - var formatsNode *xmlNode - for _, child := range config.children { - if child.name == formatsID { - formatsNode = child - break - } - } - - if formatsNode == nil { - return formats, nil - } - - err := checkUnexpectedAttribute(formatsNode) - if err != nil { - return nil, err - } - - err = checkExpectedElements(formatsNode, multipleMandatoryElements("format")) - if err != nil { - return nil, err - } - - for _, formatNode := range formatsNode.children { - if formatNode.name != formatID { - return nil, errors.New("incorrect nested element in " + formatsID + " section: " + formatNode.name) - } - - err := checkUnexpectedAttribute(formatNode, formatKeyAttrID, formatID) - if err != nil { - return nil, err - } - - id, isID := formatNode.attributes[formatKeyAttrID] - formatStr, isFormat := formatNode.attributes[formatAttrID] - if !isID { - return nil, errors.New("format has no '" + formatKeyAttrID + "' attribute") - } - if !isFormat { - return nil, errors.New("format[" + id + "] has no '" + formatAttrID + "' attribute") - } - - formatter, err := NewFormatter(formatStr) - if err != nil { - return nil, err - } - - formats[id] = formatter - } - - return formats, nil -} - -func getloggerTypeFromStringData(config *xmlNode) (logType loggerTypeFromString, logData interface{}, err error) { - logTypeStr, loggerTypeExists := config.attributes[loggerTypeFromStringAttr] - - if !loggerTypeExists { - return defaultloggerTypeFromString, nil, nil - } - - logType, found := getLoggerTypeFromString(logTypeStr) - - if !found { - return 0, nil, fmt.Errorf("unknown logger type: %s", logTypeStr) - } - - if logType == asyncTimerloggerTypeFromString { - intervalStr, intervalExists := config.attributes[asyncLoggerIntervalAttr] - if !intervalExists { - return 0, nil, newMissingArgumentError(config.name, asyncLoggerIntervalAttr) - } - - interval, err := strconv.ParseUint(intervalStr, 10, 32) - if err != nil { - return 0, nil, err - } - - logData = asyncTimerLoggerData{uint32(interval)} - } else if logType == adaptiveLoggerTypeFromString { - - // Min interval - minIntStr, minIntExists := config.attributes[adaptLoggerMinIntervalAttr] - if !minIntExists { - return 0, nil, newMissingArgumentError(config.name, adaptLoggerMinIntervalAttr) - } - minInterval, err := strconv.ParseUint(minIntStr, 10, 32) - if err != nil { - return 0, nil, err - } - - // Max interval - maxIntStr, maxIntExists := config.attributes[adaptLoggerMaxIntervalAttr] - if !maxIntExists { - return 0, nil, newMissingArgumentError(config.name, adaptLoggerMaxIntervalAttr) - } - maxInterval, err := strconv.ParseUint(maxIntStr, 10, 32) - if err != nil { - return 0, nil, err - } - - // Critical msg count - criticalMsgCountStr, criticalMsgCountExists := config.attributes[adaptLoggerCriticalMsgCountAttr] - if !criticalMsgCountExists { - return 0, nil, newMissingArgumentError(config.name, adaptLoggerCriticalMsgCountAttr) - } - criticalMsgCount, err := strconv.ParseUint(criticalMsgCountStr, 10, 32) - if err != nil { - return 0, nil, err - } - - logData = adaptiveLoggerData{uint32(minInterval), uint32(maxInterval), uint32(criticalMsgCount)} - } - - return logType, logData, nil -} - -func getOutputsTree(config *xmlNode, formats map[string]*formatter, cfg *CfgParseParams) (dispatcherInterface, error) { - var outputsNode *xmlNode - for _, child := range config.children { - if child.name == outputsID { - outputsNode = child - break - } - } - - if outputsNode != nil { - err := checkUnexpectedAttribute(outputsNode, outputFormatID) - if err != nil { - return nil, err - } - - formatter, err := getCurrentFormat(outputsNode, DefaultFormatter, formats) - if err != nil { - return nil, err - } - - output, err := createSplitter(outputsNode, formatter, formats, cfg) - if err != nil { - return nil, err - } - - dispatcher, ok := output.(dispatcherInterface) - if ok { - return dispatcher, nil - } - } - - console, err := NewConsoleWriter() - if err != nil { - return nil, err - } - return NewSplitDispatcher(DefaultFormatter, []interface{}{console}) -} - -func getCurrentFormat(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter) (*formatter, error) { - formatID, isFormatID := node.attributes[outputFormatID] - if !isFormatID { - return formatFromParent, nil - } - - format, ok := formats[formatID] - if ok { - return format, nil - } - - // Test for predefined format match - pdFormat, pdOk := predefinedFormats[formatID] - - if !pdOk { - return nil, errors.New("formatid = '" + formatID + "' doesn't exist") - } - - return pdFormat, nil -} - -func createInnerReceivers(node *xmlNode, format *formatter, formats map[string]*formatter, cfg *CfgParseParams) ([]interface{}, error) { - var outputs []interface{} - for _, childNode := range node.children { - entry, ok := elementMap[childNode.name] - if !ok { - return nil, errors.New("unnknown tag '" + childNode.name + "' in outputs section") - } - - output, err := entry.constructor(childNode, format, formats, cfg) - if err != nil { - return nil, err - } - - outputs = append(outputs, output) - } - - return outputs, nil -} - -func createSplitter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID) - if err != nil { - return nil, err - } - - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) - if err != nil { - return nil, err - } - - return NewSplitDispatcher(currentFormat, receivers) -} - -func createCustomReceiver(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - dataCustomPrefixes := make(map[string]string) - // Expecting only 'formatid', 'name' and 'data-' attrs - for attr, attrval := range node.attributes { - isExpected := false - if attr == outputFormatID || - attr == customNameAttrID { - isExpected = true - } - if strings.HasPrefix(attr, customNameDataAttrPrefix) { - dataCustomPrefixes[attr[len(customNameDataAttrPrefix):]] = attrval - isExpected = true - } - if !isExpected { - return nil, newUnexpectedAttributeError(node.name, attr) - } - } - - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - customName, hasCustomName := node.attributes[customNameAttrID] - if !hasCustomName { - return nil, newMissingArgumentError(node.name, customNameAttrID) - } - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - args := CustomReceiverInitArgs{ - XmlCustomAttrs: dataCustomPrefixes, - } - - if cfg != nil && cfg.CustomReceiverProducers != nil { - if prod, ok := cfg.CustomReceiverProducers[customName]; ok { - rec, err := prod(args) - if err != nil { - return nil, err - } - creceiver, err := NewCustomReceiverDispatcherByValue(currentFormat, rec, customName, args) - if err != nil { - return nil, err - } - err = rec.AfterParse(args) - if err != nil { - return nil, err - } - return creceiver, nil - } - } - - return NewCustomReceiverDispatcher(currentFormat, customName, args) -} - -func createFilter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, filterLevelsAttrID) - if err != nil { - return nil, err - } - - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - levelsStr, isLevels := node.attributes[filterLevelsAttrID] - if !isLevels { - return nil, newMissingArgumentError(node.name, filterLevelsAttrID) - } - - levels, err := parseLevels(levelsStr) - if err != nil { - return nil, err - } - - receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) - if err != nil { - return nil, err - } - - return NewFilterDispatcher(currentFormat, receivers, levels...) -} - -func createfileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, pathID) - if err != nil { - return nil, err - } - - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - path, isPath := node.attributes[pathID] - if !isPath { - return nil, newMissingArgumentError(node.name, pathID) - } - - fileWriter, err := NewFileWriter(path) - if err != nil { - return nil, err - } - - return NewFormattedWriter(fileWriter, currentFormat) -} - -// Creates new SMTP writer if encountered in the config file. -func createSMTPWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, senderaddressID, senderNameID, hostNameID, hostPortID, userNameID, userPassID, subjectID) - if err != nil { - return nil, err - } - // Node must have children. - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - senderAddress, ok := node.attributes[senderaddressID] - if !ok { - return nil, newMissingArgumentError(node.name, senderaddressID) - } - senderName, ok := node.attributes[senderNameID] - if !ok { - return nil, newMissingArgumentError(node.name, senderNameID) - } - // Process child nodes scanning for recipient email addresses and/or CA certificate paths. - var recipientAddresses []string - var caCertDirPaths []string - var mailHeaders []string - for _, childNode := range node.children { - switch childNode.name { - // Extract recipient address from child nodes. - case recipientID: - address, ok := childNode.attributes[addressID] - if !ok { - return nil, newMissingArgumentError(childNode.name, addressID) - } - recipientAddresses = append(recipientAddresses, address) - // Extract CA certificate file path from child nodes. - case cACertDirpathID: - path, ok := childNode.attributes[pathID] - if !ok { - return nil, newMissingArgumentError(childNode.name, pathID) - } - caCertDirPaths = append(caCertDirPaths, path) - - // Extract email headers from child nodes. - case mailHeaderID: - headerName, ok := childNode.attributes[mailHeaderNameID] - if !ok { - return nil, newMissingArgumentError(childNode.name, mailHeaderNameID) - } - - headerValue, ok := childNode.attributes[mailHeaderValueID] - if !ok { - return nil, newMissingArgumentError(childNode.name, mailHeaderValueID) - } - - // Build header line - mailHeaders = append(mailHeaders, fmt.Sprintf("%s: %s", headerName, headerValue)) - default: - return nil, newUnexpectedChildElementError(childNode.name) - } - } - hostName, ok := node.attributes[hostNameID] - if !ok { - return nil, newMissingArgumentError(node.name, hostNameID) - } - - hostPort, ok := node.attributes[hostPortID] - if !ok { - return nil, newMissingArgumentError(node.name, hostPortID) - } - - // Check if the string can really be converted into int. - if _, err := strconv.Atoi(hostPort); err != nil { - return nil, errors.New("invalid host port number") - } - - userName, ok := node.attributes[userNameID] - if !ok { - return nil, newMissingArgumentError(node.name, userNameID) - } - - userPass, ok := node.attributes[userPassID] - if !ok { - return nil, newMissingArgumentError(node.name, userPassID) - } - - // subject is optionally set by configuration. - // default value is defined by DefaultSubjectPhrase constant in the writers_smtpwriter.go - var subjectPhrase = DefaultSubjectPhrase - - subject, ok := node.attributes[subjectID] - if ok { - subjectPhrase = subject - } - - smtpWriter := NewSMTPWriter( - senderAddress, - senderName, - recipientAddresses, - hostName, - hostPort, - userName, - userPass, - caCertDirPaths, - subjectPhrase, - mailHeaders, - ) - - return NewFormattedWriter(smtpWriter, currentFormat) -} - -func createConsoleWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID) - if err != nil { - return nil, err - } - - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - consoleWriter, err := NewConsoleWriter() - if err != nil { - return nil, err - } - - return NewFormattedWriter(consoleWriter, currentFormat) -} - -func createconnWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - err := checkUnexpectedAttribute(node, outputFormatID, connWriterAddrAttr, connWriterNetAttr, connWriterReconnectOnMsgAttr, connWriterUseTLSAttr, connWriterInsecureSkipVerifyAttr) - if err != nil { - return nil, err - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - addr, isAddr := node.attributes[connWriterAddrAttr] - if !isAddr { - return nil, newMissingArgumentError(node.name, connWriterAddrAttr) - } - - net, isNet := node.attributes[connWriterNetAttr] - if !isNet { - return nil, newMissingArgumentError(node.name, connWriterNetAttr) - } - - reconnectOnMsg := false - reconnectOnMsgStr, isReconnectOnMsgStr := node.attributes[connWriterReconnectOnMsgAttr] - if isReconnectOnMsgStr { - if reconnectOnMsgStr == "true" { - reconnectOnMsg = true - } else if reconnectOnMsgStr == "false" { - reconnectOnMsg = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterReconnectOnMsgAttr + "' attribute value") - } - } - - useTLS := false - useTLSStr, isUseTLSStr := node.attributes[connWriterUseTLSAttr] - if isUseTLSStr { - if useTLSStr == "true" { - useTLS = true - } else if useTLSStr == "false" { - useTLS = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterUseTLSAttr + "' attribute value") - } - if useTLS { - insecureSkipVerify := false - insecureSkipVerifyStr, isInsecureSkipVerify := node.attributes[connWriterInsecureSkipVerifyAttr] - if isInsecureSkipVerify { - if insecureSkipVerifyStr == "true" { - insecureSkipVerify = true - } else if insecureSkipVerifyStr == "false" { - insecureSkipVerify = false - } else { - return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterInsecureSkipVerifyAttr + "' attribute value") - } - } - config := tls.Config{InsecureSkipVerify: insecureSkipVerify} - connWriter := newTLSWriter(net, addr, reconnectOnMsg, &config) - return NewFormattedWriter(connWriter, currentFormat) - } - } - - connWriter := NewConnWriter(net, addr, reconnectOnMsg) - - return NewFormattedWriter(connWriter, currentFormat) -} - -func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - if node.hasChildren() { - return nil, errNodeCannotHaveChildren - } - - rollingTypeStr, isRollingType := node.attributes[rollingFileTypeAttr] - if !isRollingType { - return nil, newMissingArgumentError(node.name, rollingFileTypeAttr) - } - - rollingType, ok := rollingTypeFromString(rollingTypeStr) - if !ok { - return nil, errors.New("unknown rolling file type: " + rollingTypeStr) - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - path, isPath := node.attributes[rollingFilePathAttr] - if !isPath { - return nil, newMissingArgumentError(node.name, rollingFilePathAttr) - } - - rollingArchiveStr, archiveAttrExists := node.attributes[rollingFileArchiveAttr] - - var rArchiveType rollingArchiveType - var rArchivePath string - if !archiveAttrExists { - rArchiveType = rollingArchiveNone - rArchivePath = "" - } else { - rArchiveType, ok = rollingArchiveTypeFromString(rollingArchiveStr) - if !ok { - return nil, errors.New("unknown rolling archive type: " + rollingArchiveStr) - } - - if rArchiveType == rollingArchiveNone { - rArchivePath = "" - } else { - rArchivePath, ok = node.attributes[rollingFileArchivePathAttr] - if !ok { - rArchivePath, ok = rollingArchiveTypesDefaultNames[rArchiveType] - if !ok { - return nil, fmt.Errorf("cannot get default filename for archive type = %v", - rArchiveType) - } - } - } - } - - nameMode := rollingNameMode(rollingNameModePostfix) - nameModeStr, ok := node.attributes[rollingFileNameModeAttr] - if ok { - mode, found := rollingNameModeFromString(nameModeStr) - if !found { - return nil, errors.New("unknown rolling filename mode: " + nameModeStr) - } else { - nameMode = mode - } - } - - if rollingType == rollingTypeSize { - err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, - rollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr, - rollingFileArchivePathAttr, rollingFileNameModeAttr) - if err != nil { - return nil, err - } - - maxSizeStr, ok := node.attributes[rollingFileMaxSizeAttr] - if !ok { - return nil, newMissingArgumentError(node.name, rollingFileMaxSizeAttr) - } - - maxSize, err := strconv.ParseInt(maxSizeStr, 10, 64) - if err != nil { - return nil, err - } - - maxRolls := 0 - maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] - if ok { - maxRolls, err = strconv.Atoi(maxRollsStr) - if err != nil { - return nil, err - } - } - - rollingWriter, err := NewRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode) - if err != nil { - return nil, err - } - - return NewFormattedWriter(rollingWriter, currentFormat) - - } else if rollingType == rollingTypeTime { - err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, - rollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr, - rollingFileArchivePathAttr, rollingFileNameModeAttr) - if err != nil { - return nil, err - } - - maxRolls := 0 - maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] - if ok { - maxRolls, err = strconv.Atoi(maxRollsStr) - if err != nil { - return nil, err - } - } - - dataPattern, ok := node.attributes[rollingFileDataPatternAttr] - if !ok { - return nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr) - } - - rollingWriter, err := NewRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, rollingIntervalAny, nameMode) - if err != nil { - return nil, err - } - - return NewFormattedWriter(rollingWriter, currentFormat) - } - - return nil, errors.New("incorrect rolling writer type " + rollingTypeStr) -} - -func createbufferedWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { - err := checkUnexpectedAttribute(node, outputFormatID, bufferedSizeAttr, bufferedFlushPeriodAttr) - if err != nil { - return nil, err - } - - if !node.hasChildren() { - return nil, errNodeMustHaveChildren - } - - currentFormat, err := getCurrentFormat(node, formatFromParent, formats) - if err != nil { - return nil, err - } - - sizeStr, isSize := node.attributes[bufferedSizeAttr] - if !isSize { - return nil, newMissingArgumentError(node.name, bufferedSizeAttr) - } - - size, err := strconv.Atoi(sizeStr) - if err != nil { - return nil, err - } - - flushPeriod := 0 - flushPeriodStr, isFlushPeriod := node.attributes[bufferedFlushPeriodAttr] - if isFlushPeriod { - flushPeriod, err = strconv.Atoi(flushPeriodStr) - if err != nil { - return nil, err - } - } - - // Inner writer couldn't have its own format, so we pass 'currentFormat' as its parent format - receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) - if err != nil { - return nil, err - } - - formattedWriter, ok := receivers[0].(*formattedWriter) - if !ok { - return nil, errors.New("buffered writer's child is not writer") - } - - // ... and then we check that it hasn't changed - if formattedWriter.Format() != currentFormat { - return nil, errors.New("inner writer cannot have his own format") - } - - bufferedWriter, err := NewBufferedWriter(formattedWriter.Writer(), size, time.Duration(flushPeriod)) - if err != nil { - return nil, err - } - - return NewFormattedWriter(bufferedWriter, currentFormat) -} - -// Returns an error if node has any attributes not listed in expectedAttrs. -func checkUnexpectedAttribute(node *xmlNode, expectedAttrs ...string) error { - for attr := range node.attributes { - isExpected := false - for _, expected := range expectedAttrs { - if attr == expected { - isExpected = true - break - } - } - if !isExpected { - return newUnexpectedAttributeError(node.name, attr) - } - } - - return nil -} - -type expectedElementInfo struct { - name string - mandatory bool - multiple bool -} - -func optionalElement(name string) expectedElementInfo { - return expectedElementInfo{name, false, false} -} -func mandatoryElement(name string) expectedElementInfo { - return expectedElementInfo{name, true, false} -} -func multipleElements(name string) expectedElementInfo { - return expectedElementInfo{name, false, true} -} -func multipleMandatoryElements(name string) expectedElementInfo { - return expectedElementInfo{name, true, true} -} - -func checkExpectedElements(node *xmlNode, elements ...expectedElementInfo) error { - for _, element := range elements { - count := 0 - for _, child := range node.children { - if child.name == element.name { - count++ - } - } - - if count == 0 && element.mandatory { - return errors.New(node.name + " does not have mandatory subnode - " + element.name) - } - if count > 1 && !element.multiple { - return errors.New(node.name + " has more then one subnode - " + element.name) - } - } - - for _, child := range node.children { - isExpected := false - for _, element := range elements { - if child.name == element.name { - isExpected = true - } - } - - if !isExpected { - return errors.New(node.name + " has unexpected child: " + child.name) - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_parser_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/cfg_parser_test.go deleted file mode 100644 index 9c6f9b87..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/cfg_parser_test.go +++ /dev/null @@ -1,1089 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "path/filepath" - "regexp" - "strings" - "testing" -) - -type customTestReceiverOutput struct { - initCalled bool - dataPassed string - messageOutput string - levelOutput LogLevel - closed bool - flushed bool -} -type customTestReceiver struct{ co *customTestReceiverOutput } - -func (cr *customTestReceiver) ReceiveMessage(message string, level LogLevel, context LogContextInterface) error { - cr.co.messageOutput = message - cr.co.levelOutput = level - return nil -} - -func (cr *customTestReceiver) String() string { - return fmt.Sprintf("custom data='%s'", cr.co.dataPassed) -} - -func (cr *customTestReceiver) AfterParse(initArgs CustomReceiverInitArgs) error { - cr.co = new(customTestReceiverOutput) - cr.co.initCalled = true - cr.co.dataPassed = initArgs.XmlCustomAttrs["test"] - return nil -} - -func (cr *customTestReceiver) Flush() { - cr.co.flushed = true -} - -func (cr *customTestReceiver) Close() error { - cr.co.closed = true - return nil -} - -var re = regexp.MustCompile(`[^a-zA-Z0-9]+`) - -func getTestFileName(testName, postfix string) string { - if len(postfix) != 0 { - return strings.ToLower(re.ReplaceAllString(testName, "_")) + "_" + postfix + "_test.log" - } - return strings.ToLower(re.ReplaceAllString(testName, "_")) + "_test.log" -} - -var parserTests []parserTest - -type parserTest struct { - testName string - config string - expected *configForParsing //interface{} - errorExpected bool - parserConfig *CfgParseParams -} - -func getParserTests() []parserTest { - if parserTests == nil { - parserTests = make([]parserTest, 0) - - testName := "Simple file output" - testLogFileName := getTestFileName(testName, "") - testConfig := ` - - - - - - ` - testExpected := new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testfileWriter, _ := NewFileWriter(testLogFileName) - testHeadSplitter, _ := NewSplitDispatcher(DefaultFormatter, []interface{}{testfileWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Filter dispatcher" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testfileWriter, _ = NewFileWriter(testLogFileName) - testFilter, _ := NewFilterDispatcher(DefaultFormatter, []interface{}{testfileWriter}, DebugLvl, InfoLvl, CriticalLvl) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testFilter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Console writer" - testConfig = ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ := NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "SMTP writer" - testConfig = ` - - - - - - - - - - - - ` - - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testSMTPWriter := NewSMTPWriter( - "sa", - "sn", - []string{"ra1", "ra2", "ra3"}, - "hn", - "123", - "un", - "up", - []string{"cacdp1", "cacdp2"}, - DefaultSubjectPhrase, - nil, - ) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testSMTPWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "SMTP writer custom header and subject configuration" - testConfig = ` - - - - - -
-
-
-
- - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testSMTPWriter = NewSMTPWriter( - "sa", - "sn", - []string{"ra1"}, - "hn", - "123", - "un", - "up", - []string{"cacdp1"}, - "ohlala", - []string{"Priority: Urgent", "Importance: high", "Sensitivity: Company-Confidential", "Auto-Submitted: auto-generated"}, - ) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testSMTPWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Default output" - testConfig = ` - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Asyncloop behavior" - testConfig = ` - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Asynctimer behavior" - testConfig = ` - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncTimerloggerTypeFromString - testExpected.LoggerData = asyncTimerLoggerData{101} - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Rolling file writer size" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriter, _ := NewRollingFileWriterSize(testLogFileName, rollingArchiveNone, "", 100, 5, rollingNameModePostfix) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Rolling file writer archive zip" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriter, _ = NewRollingFileWriterSize(testLogFileName, rollingArchiveZip, "log.zip", 100, 5, rollingNameModePostfix) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Rolling file writer archive zip with specified path" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriter, _ = NewRollingFileWriterSize(testLogFileName, rollingArchiveZip, "test.zip", 100, 5, rollingNameModePrefix) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Rolling file writer archive none" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriter, _ = NewRollingFileWriterSize(testLogFileName, rollingArchiveNone, "", 100, 5, rollingNameModePostfix) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Rolling file writer date" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriterTime, _ := NewRollingFileWriterTime(testLogFileName, rollingArchiveNone, "", 0, "2006-01-02T15:04:05Z07:00", rollingIntervalAny, rollingNameModePostfix) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriterTime}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Buffered writer" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriterTime, _ = NewRollingFileWriterTime(testLogFileName, rollingArchiveNone, "", 0, "2006-01-02T15:04:05Z07:00", rollingIntervalDaily, rollingNameModePostfix) - testbufferedWriter, _ := NewBufferedWriter(testrollingFileWriterTime, 100500, 100) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testbufferedWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Inner splitter output" - testLogFileName1 := getTestFileName(testName, "1") - testLogFileName2 := getTestFileName(testName, "2") - testLogFileName3 := getTestFileName(testName, "3") - testConfig = ` - - - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testfileWriter1, _ := NewFileWriter(testLogFileName2) - testfileWriter2, _ := NewFileWriter(testLogFileName3) - testInnerSplitter, _ := NewSplitDispatcher(DefaultFormatter, []interface{}{testfileWriter1, testfileWriter2}) - testfileWriter, _ = NewFileWriter(testLogFileName1) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testfileWriter, testInnerSplitter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - RegisterReceiver("custom-name-1", &customTestReceiver{}) - - testName = "Custom receiver 1" - testConfig = ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testCustomReceiver, _ := NewCustomReceiverDispatcher(DefaultFormatter, "custom-name-1", CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": "set", - }, - }) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testCustomReceiver}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Custom receiver 2" - testConfig = ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - crec := &customTestReceiver{} - cargs := CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": "set2", - }, - } - crec.AfterParse(cargs) - testCustomReceiver2, _ := NewCustomReceiverDispatcherByValue(DefaultFormatter, crec, "custom-name-2", cargs) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testCustomReceiver2}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - fnc := func(initArgs CustomReceiverInitArgs) (CustomReceiver, error) { - return &customTestReceiver{}, nil - } - cfg := CfgParseParams{ - CustomReceiverProducers: map[string]CustomReceiverProducer{ - "custom-name-2": CustomReceiverProducer(fnc), - }, - } - testExpected.Params = &cfg - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, &cfg}) - - RegisterReceiver("-", &customTestReceiver{}) - testName = "Custom receiver 3" - testConfig = ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - creccustom := &customTestReceiver{} - cargs3 := CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": "set3", - }, - } - creccustom.AfterParse(cargs3) - testCustomReceiver, _ = NewCustomReceiverDispatcherByValue(DefaultFormatter, creccustom, "-", cargs3) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testCustomReceiver}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Custom receivers with formats" - testConfig = ` - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testCustomReceivers := make([]*customReceiverDispatcher, 3) - for i := 0; i < 3; i++ { - testCustomReceivers[i], _ = NewCustomReceiverDispatcher(DefaultFormatter, "custom-name-1", CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": fmt.Sprintf("set%d", i+1), - }, - }) - } - - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testCustomReceivers[0], testCustomReceivers[1], testCustomReceivers[2]}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Format" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testfileWriter, _ = NewFileWriter(testLogFileName) - testFormat, _ := NewFormatter("%Level %Msg %File") - testHeadSplitter, _ = NewSplitDispatcher(testFormat, []interface{}{testfileWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Format2" - testLogFileName = getTestFileName(testName, "") - testLogFileName1 = getTestFileName(testName, "1") - testConfig = ` - - - - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testfileWriter, _ = NewFileWriter(testLogFileName) - testfileWriter1, _ = NewFileWriter(testLogFileName1) - testFormat1, _ := NewFormatter("%Level %Msg %File") - testFormat2, _ := NewFormatter("%l %Msg") - formattedWriter, _ := NewFormattedWriter(testfileWriter1, testFormat2) - testHeadSplitter, _ = NewSplitDispatcher(testFormat1, []interface{}{testfileWriter, formattedWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Minlevel = warn" - testConfig = `` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(WarnLvl, CriticalLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Maxlevel = trace" - testConfig = `` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, TraceLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Level between info and error" - testConfig = `` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(InfoLvl, ErrorLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Off with minlevel" - testConfig = `` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewOffConstraints() - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Off with levels" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Levels list" - testConfig = `` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewListConstraints([]LogLevel{ - DebugLvl, InfoLvl, CriticalLvl}) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = asyncLooploggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Errors #1" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #2" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #3" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #4" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #5" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #6" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #7" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #8" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #9" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #10" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #11" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #12" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #13" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #14" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #15" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #16" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #17" - testLogFileName = getTestFileName(testName, "") - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #18" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #19" - testConfig = `` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Exceptions: restricting" - testConfig = - ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - listConstraint, _ := NewOffConstraints() - exception, _ := NewLogLevelException("Test*", "someFile.go", listConstraint) - testExpected.Exceptions = []*LogLevelException{exception} - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Exceptions: allowing #1" - testConfig = - ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewListConstraints([]LogLevel{ErrorLvl}) - minMaxConstraint, _ := NewMinMaxConstraints(TraceLvl, CriticalLvl) - exception, _ = NewLogLevelException("*", "testfile.go", minMaxConstraint) - testExpected.Exceptions = []*LogLevelException{exception} - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Exceptions: allowing #2" - testConfig = ` - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewOffConstraints() - minMaxConstraint, _ = NewMinMaxConstraints(WarnLvl, CriticalLvl) - exception, _ = NewLogLevelException("*", "testfile.go", minMaxConstraint) - testExpected.Exceptions = []*LogLevelException{exception} - testconsoleWriter, _ = NewConsoleWriter() - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testconsoleWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Predefined formats" - formatID := predefinedPrefix + "xml-debug-short" - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testconsoleWriter, _ = NewConsoleWriter() - testFormat, _ = predefinedFormats[formatID] - testHeadSplitter, _ = NewSplitDispatcher(testFormat, []interface{}{testconsoleWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Predefined formats redefine" - testLogFileName = getTestFileName(testName, "") - formatID = predefinedPrefix + "xml-debug-short" - testConfig = ` - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testfileWriter, _ = NewFileWriter(testLogFileName) - testFormat, _ = NewFormatter("%Level %Msg %File") - testHeadSplitter, _ = NewSplitDispatcher(testFormat, []interface{}{testfileWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Conn writer 1" - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testConnWriter := NewConnWriter("tcp", ":8888", false) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testConnWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Conn writer 2" - testConfig = ` - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testConnWriter = NewConnWriter("tcp", ":8888", true) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testConnWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - testName = "Errors #11" - testConfig = ` - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #12" - testConfig = ` - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #13" - testConfig = ` - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #14" - testConfig = ` - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #15" - testConfig = ` - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #16" - testConfig = ` - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #17" - testConfig = ` - - - - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #18" - testConfig = ` - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #19" - testConfig = ` - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #20" - testConfig = ` - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #21" - testConfig = ` - - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #22" - testConfig = ` - - - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #23" - testConfig = ` - - - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #24" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #25" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Errors #26" - testConfig = ` - - - - - ` - parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) - - testName = "Buffered writer same formatid override" - testLogFileName = getTestFileName(testName, "") - testConfig = ` - - - - - - - - - - ` - testExpected = new(configForParsing) - testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) - testExpected.Exceptions = nil - testrollingFileWriterTime, _ = NewRollingFileWriterTime(testLogFileName, rollingArchiveNone, "", 0, "2006-01-02T15:04:05Z07:00", rollingIntervalDaily, rollingNameModePrefix) - testbufferedWriter, _ = NewBufferedWriter(testrollingFileWriterTime, 100500, 100) - testFormat, _ = NewFormatter("%Level %Msg %File 123") - formattedWriter, _ = NewFormattedWriter(testbufferedWriter, testFormat) - testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{formattedWriter}) - testExpected.LogType = syncloggerTypeFromString - testExpected.RootDispatcher = testHeadSplitter - parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) - - } - - return parserTests -} - -// Temporary solution: compare by string identity. Not the best solution in -// terms of performance, but a valid one in terms of comparison, because -// every seelog dispatcher/receiver must have a valid String() func -// that fully represents its internal parameters. -func configsAreEqual(conf1 *configForParsing, conf2 interface{}) bool { - if conf1 == nil { - return conf2 == nil - } - if conf2 == nil { - return conf1 == nil - } - - // configForParsing, ok := conf2 //.(*configForParsing) - // if !ok { - // return false - // } - - return fmt.Sprintf("%v", conf1) == fmt.Sprintf("%v", conf2) //configForParsing) -} - -func testLogFileFilter(fn string) bool { - return ".log" == filepath.Ext(fn) -} - -func cleanupAfterCfgTest(t *testing.T) { - toDel, err := getDirFilePaths(".", testLogFileFilter, true) - if nil != err { - t.Fatal("Cannot list files in test directory!") - } - - for _, p := range toDel { - err = tryRemoveFile(p) - if nil != err { - t.Errorf("cannot remove file %s in test directory: %s", p, err.Error()) - } - } -} - -func parseTest(test parserTest, t *testing.T) { - conf, err := configFromReaderWithConfig(strings.NewReader(test.config), test.parserConfig) - if /*err != nil &&*/ conf != nil && conf.RootDispatcher != nil { - defer func() { - if err = conf.RootDispatcher.Close(); err != nil { - t.Errorf("\n----ERROR while closing root dispatcher in %s test: %s", test.testName, err) - } - }() - } - - if (err != nil) != test.errorExpected { - t.Errorf("\n----ERROR in %s:\nConfig: %s\n* Expected error:%t. Got error: %t\n", - test.testName, test.config, test.errorExpected, (err != nil)) - if err != nil { - t.Logf("%s\n", err.Error()) - } - return - } - - if err == nil && !configsAreEqual(conf, test.expected) { - t.Errorf("\n----ERROR in %s:\nConfig: %s\n* Expected: %v. \n* Got: %v\n", - test.testName, test.config, test.expected, conf) - } -} - -func TestParser(t *testing.T) { - defer cleanupAfterCfgTest(t) - - for _, test := range getParserTests() { - parseTest(test, t) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_closer.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_closer.go deleted file mode 100644 index 1319c221..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_closer.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_constraints.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_constraints.go deleted file mode 100644 index 7ec2fe5b..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_constraints.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "strings" -) - -// Represents constraints which form a general rule for log levels selection -type logLevelConstraints interface { - IsAllowed(level LogLevel) bool -} - -// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels. -type minMaxConstraints struct { - min LogLevel - max LogLevel -} - -// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels. -func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) { - if min > max { - return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max) - } - if min < TraceLvl || min > CriticalLvl { - return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min) - } - if max < TraceLvl || max > CriticalLvl { - return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max) - } - - return &minMaxConstraints{min, max}, nil -} - -// IsAllowed returns true, if log level is in [min, max] range (inclusive). -func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool { - return level >= minMaxConstr.min && level <= minMaxConstr.max -} - -func (minMaxConstr *minMaxConstraints) String() string { - return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max) -} - -//======================================================= - -// A listConstraints represents constraints which use allowed log levels list. -type listConstraints struct { - allowedLevels map[LogLevel]bool -} - -// NewListConstraints creates a new listConstraints struct with the specified allowed levels. -func NewListConstraints(allowList []LogLevel) (*listConstraints, error) { - if allowList == nil { - return nil, errors.New("list can't be nil") - } - - allowLevels, err := createMapFromList(allowList) - if err != nil { - return nil, err - } - err = validateOffLevel(allowLevels) - if err != nil { - return nil, err - } - - return &listConstraints{allowLevels}, nil -} - -func (listConstr *listConstraints) String() string { - allowedList := "List: " - - listLevel := make([]string, len(listConstr.allowedLevels)) - - var logLevel LogLevel - i := 0 - for logLevel = TraceLvl; logLevel <= Off; logLevel++ { - if listConstr.allowedLevels[logLevel] { - listLevel[i] = logLevel.String() - i++ - } - } - - allowedList += strings.Join(listLevel, ",") - - return allowedList -} - -func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) { - allowedLevels := make(map[LogLevel]bool, 0) - for _, level := range allowedList { - if level < TraceLvl || level > Off { - return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level) - } - allowedLevels[level] = true - } - return allowedLevels, nil -} -func validateOffLevel(allowedLevels map[LogLevel]bool) error { - if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 { - return errors.New("logLevel Off cant be mixed with other levels") - } - - return nil -} - -// IsAllowed returns true, if log level is in allowed log levels list. -// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values. -func (listConstr *listConstraints) IsAllowed(level LogLevel) bool { - for l := range listConstr.allowedLevels { - if l == level && level != Off { - return true - } - } - - return false -} - -// AllowedLevels returns allowed levels configuration as a map. -func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool { - return listConstr.allowedLevels -} - -//======================================================= - -type offConstraints struct { -} - -func NewOffConstraints() (*offConstraints, error) { - return &offConstraints{}, nil -} - -func (offConstr *offConstraints) IsAllowed(level LogLevel) bool { - return false -} - -func (offConstr *offConstraints) String() string { - return "Off constraint" -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_constraints_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_constraints_test.go deleted file mode 100644 index bb9918e6..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_constraints_test.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "testing" -) - -func TestInvalidminMaxConstraints(t *testing.T) { - constr, err := NewMinMaxConstraints(CriticalLvl, WarnLvl) - - if err == nil || constr != nil { - t.Errorf("expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v", - CriticalLvl, WarnLvl, err, constr) - return - } -} - -func TestInvalidLogLevels(t *testing.T) { - var invalidMin uint8 = 123 - var invalidMax uint8 = 124 - minMaxConstr, errMinMax := NewMinMaxConstraints(LogLevel(invalidMin), LogLevel(invalidMax)) - - if errMinMax == nil || minMaxConstr != nil { - t.Errorf("expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v", - invalidMin, invalidMax, errMinMax, minMaxConstr) - return - } - - invalidList := []LogLevel{145} - - listConstr, errList := NewListConstraints(invalidList) - - if errList == nil || listConstr != nil { - t.Errorf("expected an error and a nil value for constraints list: %v. Got: %v, %v", - invalidList, errList, listConstr) - return - } -} - -func TestlistConstraintsWithDuplicates(t *testing.T) { - duplicateList := []LogLevel{TraceLvl, DebugLvl, InfoLvl, - WarnLvl, ErrorLvl, CriticalLvl, CriticalLvl, CriticalLvl} - - listConstr, errList := NewListConstraints(duplicateList) - - if errList != nil || listConstr == nil { - t.Errorf("expected a valid constraints list struct for: %v, got error: %v, value: %v", - duplicateList, errList, listConstr) - return - } - - listLevels := listConstr.AllowedLevels() - - if listLevels == nil { - t.Fatalf("listConstr.AllowedLevels() == nil") - return - } - - if len(listLevels) != 6 { - t.Errorf("expected: listConstr.AllowedLevels() length == 6. Got: %d", len(listLevels)) - return - } -} - -func TestlistConstraintsWithOffInList(t *testing.T) { - offList := []LogLevel{TraceLvl, DebugLvl, Off} - - listConstr, errList := NewListConstraints(offList) - - if errList == nil || listConstr != nil { - t.Errorf("expected an error and a nil value for constraints list with 'Off': %v. Got: %v, %v", - offList, errList, listConstr) - return - } -} - -type logLevelTestCase struct { - level LogLevel - allowed bool -} - -var minMaxTests = []logLevelTestCase{ - {TraceLvl, false}, - {DebugLvl, false}, - {InfoLvl, true}, - {WarnLvl, true}, - {ErrorLvl, false}, - {CriticalLvl, false}, - {123, false}, - {6, false}, -} - -func TestValidminMaxConstraints(t *testing.T) { - - constr, err := NewMinMaxConstraints(InfoLvl, WarnLvl) - - if err != nil || constr == nil { - t.Errorf("expected a valid constraints struct for minmax constraints: min = %d, max = %d. Got: %v, %v", - InfoLvl, WarnLvl, err, constr) - return - } - - for _, minMaxTest := range minMaxTests { - allowed := constr.IsAllowed(minMaxTest.level) - if allowed != minMaxTest.allowed { - t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t", - minMaxTest.allowed, minMaxTest.level, allowed) - return - } - } -} - -var listTests = []logLevelTestCase{ - {TraceLvl, true}, - {DebugLvl, false}, - {InfoLvl, true}, - {WarnLvl, true}, - {ErrorLvl, false}, - {CriticalLvl, true}, - {123, false}, - {6, false}, -} - -func TestValidlistConstraints(t *testing.T) { - validList := []LogLevel{TraceLvl, InfoLvl, WarnLvl, CriticalLvl} - constr, err := NewListConstraints(validList) - - if err != nil || constr == nil { - t.Errorf("expected a valid constraints list struct for: %v. Got error: %v, value: %v", - validList, err, constr) - return - } - - for _, minMaxTest := range listTests { - allowed := constr.IsAllowed(minMaxTest.level) - if allowed != minMaxTest.allowed { - t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t", - minMaxTest.allowed, minMaxTest.level, allowed) - return - } - } -} - -var offTests = []logLevelTestCase{ - {TraceLvl, false}, - {DebugLvl, false}, - {InfoLvl, false}, - {WarnLvl, false}, - {ErrorLvl, false}, - {CriticalLvl, false}, - {123, false}, - {6, false}, -} - -func TestValidListoffConstraints(t *testing.T) { - validList := []LogLevel{Off} - constr, err := NewListConstraints(validList) - - if err != nil || constr == nil { - t.Errorf("expected a valid constraints list struct for: %v. Got error: %v, value: %v", - validList, err, constr) - return - } - - for _, minMaxTest := range offTests { - allowed := constr.IsAllowed(minMaxTest.level) - if allowed != minMaxTest.allowed { - t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t", - minMaxTest.allowed, minMaxTest.level, allowed) - return - } - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_context.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_context.go deleted file mode 100644 index 04bc2235..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_context.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var workingDir = "/" - -func init() { - wd, err := os.Getwd() - if err == nil { - workingDir = filepath.ToSlash(wd) + "/" - } -} - -// Represents runtime caller context. -type LogContextInterface interface { - // Caller's function name. - Func() string - // Caller's line number. - Line() int - // Caller's file short path (in slashed form). - ShortPath() string - // Caller's file full path (in slashed form). - FullPath() string - // Caller's file name (without path). - FileName() string - // True if the context is correct and may be used. - // If false, then an error in context evaluation occurred and - // all its other data may be corrupted. - IsValid() bool - // Time when log function was called. - CallTime() time.Time - // Custom context that can be set by calling logger.SetContext - CustomContext() interface{} -} - -// Returns context of the caller -func currentContext(custom interface{}) (LogContextInterface, error) { - return specifyContext(1, custom) -} - -func extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, line int, err error) { - pc, fp, ln, ok := runtime.Caller(skip) - if !ok { - err = fmt.Errorf("error during runtime.Caller") - return - } - line = ln - fullPath = fp - if strings.HasPrefix(fp, workingDir) { - shortPath = fp[len(workingDir):] - } else { - shortPath = fp - } - funcName = runtime.FuncForPC(pc).Name() - if strings.HasPrefix(funcName, workingDir) { - funcName = funcName[len(workingDir):] - } - return -} - -// Returns context of the function with placed "skip" stack frames of the caller -// If skip == 0 then behaves like currentContext -// Context is returned in any situation, even if error occurs. But, if an error -// occurs, the returned context is an error context, which contains no paths -// or names, but states that they can't be extracted. -func specifyContext(skip int, custom interface{}) (LogContextInterface, error) { - callTime := time.Now() - if skip < 0 { - err := fmt.Errorf("can not skip negative stack frames") - return &errorContext{callTime, err}, err - } - fullPath, shortPath, funcName, line, err := extractCallerInfo(skip + 2) - if err != nil { - return &errorContext{callTime, err}, err - } - _, fileName := filepath.Split(fullPath) - return &logContext{funcName, line, shortPath, fullPath, fileName, callTime, custom}, nil -} - -// Represents a normal runtime caller context. -type logContext struct { - funcName string - line int - shortPath string - fullPath string - fileName string - callTime time.Time - custom interface{} -} - -func (context *logContext) IsValid() bool { - return true -} - -func (context *logContext) Func() string { - return context.funcName -} - -func (context *logContext) Line() int { - return context.line -} - -func (context *logContext) ShortPath() string { - return context.shortPath -} - -func (context *logContext) FullPath() string { - return context.fullPath -} - -func (context *logContext) FileName() string { - return context.fileName -} - -func (context *logContext) CallTime() time.Time { - return context.callTime -} - -func (context *logContext) CustomContext() interface{} { - return context.custom -} - -// Represents an error context -type errorContext struct { - errorTime time.Time - err error -} - -func (errContext *errorContext) getErrorText(prefix string) string { - return fmt.Sprintf("%s() error: %s", prefix, errContext.err) -} - -func (errContext *errorContext) IsValid() bool { - return false -} - -func (errContext *errorContext) Line() int { - return -1 -} - -func (errContext *errorContext) Func() string { - return errContext.getErrorText("Func") -} - -func (errContext *errorContext) ShortPath() string { - return errContext.getErrorText("ShortPath") -} - -func (errContext *errorContext) FullPath() string { - return errContext.getErrorText("FullPath") -} - -func (errContext *errorContext) FileName() string { - return errContext.getErrorText("FileName") -} - -func (errContext *errorContext) CallTime() time.Time { - return errContext.errorTime -} - -func (errContext *errorContext) CustomContext() interface{} { - return nil -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_context_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_context_test.go deleted file mode 100644 index bd1e47ab..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_context_test.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "os" - "path/filepath" - "strings" - "testing" -) - -const ( - testShortPath = "common_context_test.go" -) - -var ( - commonPrefix string - testFullPath string -) - -func init() { - // Here we remove the hardcoding of the package name which - // may break forks and some CI environments such as jenkins. - _, _, funcName, _, _ := extractCallerInfo(1) - preIndex := strings.Index(funcName, "init·") - if preIndex == -1 { - preIndex = strings.Index(funcName, "init") - } - commonPrefix = funcName[:preIndex] - wd, err := os.Getwd() - if err == nil { - // Transform the file path into a slashed form: - // This is the proper platform-neutral way. - testFullPath = filepath.ToSlash(filepath.Join(wd, testShortPath)) - } -} - -func TestContext(t *testing.T) { - context, err := currentContext(nil) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - if context == nil { - t.Fatalf("unexpected error: context is nil") - } - if fn, funcName := context.Func(), commonPrefix+"TestContext"; fn != funcName { - // Account for a case when the func full path is longer than commonPrefix but includes it. - if !strings.HasSuffix(fn, funcName) { - t.Errorf("expected context.Func == %s ; got %s", funcName, context.Func()) - } - } - if context.ShortPath() != testShortPath { - t.Errorf("expected context.ShortPath == %s ; got %s", testShortPath, context.ShortPath()) - } - if len(testFullPath) == 0 { - t.Fatal("working directory seems invalid") - } - if context.FullPath() != testFullPath { - t.Errorf("expected context.FullPath == %s ; got %s", testFullPath, context.FullPath()) - } -} - -func innerContext() (context LogContextInterface, err error) { - return currentContext(nil) -} - -func TestInnerContext(t *testing.T) { - context, err := innerContext() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - if context == nil { - t.Fatalf("unexpected error: context is nil") - } - if fn, funcName := context.Func(), commonPrefix+"innerContext"; fn != funcName { - // Account for a case when the func full path is longer than commonPrefix but includes it. - if !strings.HasSuffix(fn, funcName) { - t.Errorf("expected context.Func == %s ; got %s", funcName, context.Func()) - } - } - if context.ShortPath() != testShortPath { - t.Errorf("expected context.ShortPath == %s ; got %s", testShortPath, context.ShortPath()) - } - if len(testFullPath) == 0 { - t.Fatal("working directory seems invalid") - } - if context.FullPath() != testFullPath { - t.Errorf("expected context.FullPath == %s ; got %s", testFullPath, context.FullPath()) - } -} - -type testContext struct { - field string -} - -func TestCustomContext(t *testing.T) { - expected := "testStr" - context, err := currentContext(&testContext{expected}) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - if st, _ := context.CustomContext().(*testContext); st.field != expected { - t.Errorf("expected context.CustomContext == %s ; got %s", expected, st.field) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_exception.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_exception.go deleted file mode 100644 index 9acc2750..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_exception.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "regexp" - "strings" -) - -// Used in rules creation to validate input file and func filters -var ( - fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`) - funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`) -) - -// LogLevelException represents an exceptional case used when you need some specific files or funcs to -// override general constraints and to use their own. -type LogLevelException struct { - funcPatternParts []string - filePatternParts []string - - funcPattern string - filePattern string - - constraints logLevelConstraints -} - -// NewLogLevelException creates a new exception. -func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) { - if constraints == nil { - return nil, errors.New("constraints can not be nil") - } - - exception := new(LogLevelException) - - err := exception.initFuncPatternParts(funcPattern) - if err != nil { - return nil, err - } - exception.funcPattern = strings.Join(exception.funcPatternParts, "") - - err = exception.initFilePatternParts(filePattern) - if err != nil { - return nil, err - } - exception.filePattern = strings.Join(exception.filePatternParts, "") - - exception.constraints = constraints - - return exception, nil -} - -// MatchesContext returns true if context matches the patterns of this LogLevelException -func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool { - return logLevelEx.match(context.Func(), context.FullPath()) -} - -// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException -func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool { - return logLevelEx.constraints.IsAllowed(level) -} - -// FuncPattern returns the function pattern of a exception -func (logLevelEx *LogLevelException) FuncPattern() string { - return logLevelEx.funcPattern -} - -// FuncPattern returns the file pattern of a exception -func (logLevelEx *LogLevelException) FilePattern() string { - return logLevelEx.filePattern -} - -// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts -func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) { - - if funcFormatValidator.FindString(funcPattern) != funcPattern { - return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)") - } - - logLevelEx.funcPatternParts = splitPattern(funcPattern) - return nil -} - -// Checks whether the file filter has a correct format and splits file patterns using splitPattern. -func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) { - - if fileFormatValidator.FindString(filePattern) != filePattern { - return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)") - } - - logLevelEx.filePatternParts = splitPattern(filePattern) - return err -} - -func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool { - if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) { - return false - } - return stringMatchesPattern(logLevelEx.filePatternParts, filePath) -} - -func (logLevelEx *LogLevelException) String() string { - str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern) - - if logLevelEx.constraints != nil { - str += fmt.Sprintf("Constr: %s", logLevelEx.constraints) - } else { - str += "nil" - } - - return str -} - -// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"] -func splitPattern(pattern string) []string { - var patternParts []string - var lastChar rune - for _, char := range pattern { - if char == '*' { - if lastChar != '*' { - patternParts = append(patternParts, "*") - } - } else { - if len(patternParts) != 0 && lastChar != '*' { - patternParts[len(patternParts)-1] += string(char) - } else { - patternParts = append(patternParts, string(char)) - } - } - lastChar = char - } - - return patternParts -} - -// stringMatchesPattern check whether testString matches pattern with asterisks. -// Standard regexp functionality is not used here because of performance issues. -func stringMatchesPattern(patternparts []string, testString string) bool { - if len(patternparts) == 0 { - return len(testString) == 0 - } - - part := patternparts[0] - if part != "*" { - index := strings.Index(testString, part) - if index == 0 { - return stringMatchesPattern(patternparts[1:], testString[len(part):]) - } - } else { - if len(patternparts) == 1 { - return true - } - - newTestString := testString - part = patternparts[1] - for { - index := strings.Index(newTestString, part) - if index == -1 { - break - } - - newTestString = newTestString[index+len(part):] - result := stringMatchesPattern(patternparts[2:], newTestString) - if result { - return true - } - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_exception_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_exception_test.go deleted file mode 100644 index d98c2803..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_exception_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "testing" -) - -type exceptionTestCase struct { - funcPattern string - filePattern string - funcName string - fileName string - match bool -} - -var exceptionTestCases = []exceptionTestCase{ - {"*", "*", "func", "file", true}, - {"func*", "*", "func", "file", true}, - {"*func", "*", "func", "file", true}, - {"*func", "*", "1func", "file", true}, - {"func*", "*", "func1", "file", true}, - {"fu*nc", "*", "func", "file", true}, - {"fu*nc", "*", "fu1nc", "file", true}, - {"fu*nc", "*", "func1nc", "file", true}, - {"*fu*nc*", "*", "somefuntonc", "file", true}, - {"fu*nc", "*", "f1nc", "file", false}, - {"func*", "*", "fun", "file", false}, - {"fu*nc", "*", "func1n", "file", false}, - {"**f**u**n**c**", "*", "func1n", "file", true}, -} - -func TestMatchingCorrectness(t *testing.T) { - constraints, err := NewListConstraints([]LogLevel{TraceLvl}) - if err != nil { - t.Error(err) - return - } - - for _, testCase := range exceptionTestCases { - rule, ruleError := NewLogLevelException(testCase.funcPattern, testCase.filePattern, constraints) - if ruleError != nil { - t.Fatalf("Unexpected error on rule creation: [ %v, %v ]. %v", - testCase.funcPattern, testCase.filePattern, ruleError) - } - - match := rule.match(testCase.funcName, testCase.fileName) - if match != testCase.match { - t.Errorf("incorrect matching for [ %v, %v ] [ %v, %v ] Expected: %t. Got: %t", - testCase.funcPattern, testCase.filePattern, testCase.funcName, testCase.fileName, testCase.match, match) - } - } -} - -func TestAsterisksReducing(t *testing.T) { - constraints, err := NewListConstraints([]LogLevel{TraceLvl}) - if err != nil { - t.Error(err) - return - } - - rule, err := NewLogLevelException("***func**", "fi*****le", constraints) - if err != nil { - t.Error(err) - return - } - expectFunc := "*func*" - if rule.FuncPattern() != expectFunc { - t.Errorf("asterisks must be reduced. Expect:%v, Got:%v", expectFunc, rule.FuncPattern()) - } - - expectFile := "fi*le" - if rule.FilePattern() != expectFile { - t.Errorf("asterisks must be reduced. Expect:%v, Got:%v", expectFile, rule.FilePattern()) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_flusher.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_flusher.go deleted file mode 100644 index 0ef077c8..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_flusher.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -// flusherInterface represents all objects that have to do cleanup -// at certain moments of time (e.g. before app shutdown to avoid data loss) -type flusherInterface interface { - Flush() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/common_loglevel.go b/Godeps/_workspace/src/github.com/cihub/seelog/common_loglevel.go deleted file mode 100644 index d54ecf27..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/common_loglevel.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -// Log level type -type LogLevel uint8 - -// Log levels -const ( - TraceLvl = iota - DebugLvl - InfoLvl - WarnLvl - ErrorLvl - CriticalLvl - Off -) - -// Log level string representations (used in configuration files) -const ( - TraceStr = "trace" - DebugStr = "debug" - InfoStr = "info" - WarnStr = "warn" - ErrorStr = "error" - CriticalStr = "critical" - OffStr = "off" -) - -var levelToStringRepresentations = map[LogLevel]string{ - TraceLvl: TraceStr, - DebugLvl: DebugStr, - InfoLvl: InfoStr, - WarnLvl: WarnStr, - ErrorLvl: ErrorStr, - CriticalLvl: CriticalStr, - Off: OffStr, -} - -// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull. -func LogLevelFromString(levelStr string) (level LogLevel, found bool) { - for lvl, lvlStr := range levelToStringRepresentations { - if lvlStr == levelStr { - return lvl, true - } - } - - return 0, false -} - -// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels. -func (level LogLevel) String() string { - levelStr, ok := levelToStringRepresentations[level] - if ok { - return levelStr - } - - return "" -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_custom.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_custom.go deleted file mode 100644 index 383a7705..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_custom.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2013 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -var registeredReceivers = make(map[string]reflect.Type) - -// RegisterReceiver records a custom receiver type, identified by a value -// of that type (second argument), under the specified name. Registered -// names can be used in the "name" attribute of config items. -// -// RegisterReceiver takes the type of the receiver argument, without taking -// the value into the account. So do NOT enter any data to the second argument -// and only call it like: -// RegisterReceiver("somename", &MyReceiverType{}) -// -// After that, when a '' config tag with this name is used, -// a receiver of the specified type would be instantiated. Check -// CustomReceiver comments for interface details. -// -// NOTE 1: RegisterReceiver fails if you attempt to register different types -// with the same name. -// -// NOTE 2: RegisterReceiver registers those receivers that must be used in -// the configuration files ( items). Basically it is just the way -// you tell seelog config parser what should it do when it meets a -// tag with a specific name and data attributes. -// -// But If you are only using seelog as a proxy to an already instantiated -// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver. -func RegisterReceiver(name string, receiver CustomReceiver) { - newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface()) - if t, ok := registeredReceivers[name]; ok && t != newType { - panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType)) - } - registeredReceivers[name] = newType -} - -func customReceiverByName(name string) (creceiver CustomReceiver, err error) { - rt, ok := registeredReceivers[name] - if !ok { - return nil, fmt.Errorf("custom receiver name not registered: '%s'", name) - } - v, ok := reflect.New(rt).Interface().(CustomReceiver) - if !ok { - return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name) - } - return v, nil -} - -// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init -// func when custom receiver is being initialized. -type CustomReceiverInitArgs struct { - // XmlCustomAttrs represent '' xml config item attributes that - // start with "data-". Map keys will be the attribute names without the "data-". - // Map values will the those attribute values. - // - // E.g. if you have a '' - // you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2" - // - // Note that in custom items you can only use allowed attributes, like "name" and - // your custom attributes, starting with "data-". Any other will lead to a - // parsing error. - XmlCustomAttrs map[string]string -} - -// CustomReceiver is the interface that external custom seelog message receivers -// must implement in order to be able to process seelog messages. Those receivers -// are set in the xml config file using the tag. Check receivers reference -// wiki section on that. -// -// Use seelog.RegisterReceiver on the receiver type before using it. -type CustomReceiver interface { - // ReceiveMessage is called when the custom receiver gets seelog message from - // a parent dispatcher. - // - // Message, level and context args represent all data that was included in the seelog - // message at the time it was logged. - // - // The formatting is already applied to the message and depends on the config - // like with any other receiver. - // - // If you would like to inform seelog of an error that happened during the handling of - // the message, return a non-nil error. This way you'll end up seeing your error like - // any other internal seelog error. - ReceiveMessage(message string, level LogLevel, context LogContextInterface) error - - // AfterParse is called immediately after your custom receiver is instantiated by - // the xml config parser. So, if you need to do any startup logic after config parsing, - // like opening file or allocating any resources after the receiver is instantiated, do it here. - // - // If this func returns a non-nil error, then the loading procedure will fail. E.g. - // if you are loading a seelog xml config, the parser would not finish the loading - // procedure and inform about an error like with any other config error. - // - // If your custom logger needs some configuration, you can use custom attributes in - // your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments. - // - // IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used - // to create seelog proxy logger using the custom receiver. This func is only called when - // receiver is instantiated from a config. - AfterParse(initArgs CustomReceiverInitArgs) error - - // Flush is called when the custom receiver gets a 'flush' directive from a - // parent receiver. If custom receiver implements some kind of buffering or - // queing, then the appropriate reaction on a flush message is synchronous - // flushing of all those queues/buffers. If custom receiver doesn't have - // such mechanisms, then flush implementation may be left empty. - Flush() - - // Close is called when the custom receiver gets a 'close' directive from a - // parent receiver. This happens when a top-level seelog dispatcher is sending - // 'close' to all child nodes and it means that current seelog logger is being closed. - // If you need to do any cleanup after your custom receiver is done, you should do - // it here. - Close() error -} - -type customReceiverDispatcher struct { - formatter *formatter - innerReceiver CustomReceiver - customReceiverName string - usedArgs CustomReceiverInitArgs -} - -// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created -// using a tag in the config file. -func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { - if formatter == nil { - return nil, errors.New("formatter cannot be nil") - } - if len(customReceiverName) == 0 { - return nil, errors.New("custom receiver name cannot be empty") - } - - creceiver, err := customReceiverByName(customReceiverName) - if err != nil { - return nil, err - } - err = creceiver.AfterParse(cArgs) - if err != nil { - return nil, err - } - disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs} - - return disp, nil -} - -// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using -// a specific CustomReceiver value instead of instantiating a new one by type. -func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { - if formatter == nil { - return nil, errors.New("formatter cannot be nil") - } - if customReceiver == nil { - return nil, errors.New("customReceiver cannot be nil") - } - disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs} - - return disp, nil -} - -// CustomReceiver implementation. Check CustomReceiver comments. -func (disp *customReceiverDispatcher) Dispatch( - message string, - level LogLevel, - context LogContextInterface, - errorFunc func(err error)) { - - defer func() { - if err := recover(); err != nil { - errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err)) - } - }() - - err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context) - if err != nil { - errorFunc(err) - } -} - -// CustomReceiver implementation. Check CustomReceiver comments. -func (disp *customReceiverDispatcher) Flush() { - disp.innerReceiver.Flush() -} - -// CustomReceiver implementation. Check CustomReceiver comments. -func (disp *customReceiverDispatcher) Close() error { - disp.innerReceiver.Flush() - - err := disp.innerReceiver.Close() - if err != nil { - return err - } - - return nil -} - -func (disp *customReceiverDispatcher) String() string { - datas := "" - skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs)) - for i := range disp.usedArgs.XmlCustomAttrs { - skeys = append(skeys, i) - } - sort.Strings(skeys) - for _, key := range skeys { - datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key]) - } - - str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n", - disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver) - - return str -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_customdispatcher_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_customdispatcher_test.go deleted file mode 100644 index 23f631a2..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_customdispatcher_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2013 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "testing" -) - -type testCustomDispatcherMessageReceiver struct { - customTestReceiver -} - -func TestCustomDispatcher_Message(t *testing.T) { - recName := "TestCustomDispatcher_Message" - RegisterReceiver(recName, &testCustomDispatcherMessageReceiver{}) - - customDispatcher, err := NewCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": "testdata", - }, - }) - if err != nil { - t.Error(err) - return - } - - context, err := currentContext(nil) - if err != nil { - t.Error(err) - return - } - - bytes := []byte("Hello") - customDispatcher.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) - - cout := customDispatcher.innerReceiver.(*testCustomDispatcherMessageReceiver).customTestReceiver.co - if cout.initCalled != true { - t.Error("Init not called") - return - } - if cout.dataPassed != "testdata" { - t.Errorf("wrong data passed: '%s'", cout.dataPassed) - return - } - if cout.messageOutput != string(bytes) { - t.Errorf("wrong message output: '%s'", cout.messageOutput) - return - } - if cout.levelOutput != TraceLvl { - t.Errorf("wrong log level: '%s'", cout.levelOutput) - return - } - if cout.flushed { - t.Error("Flush was not expected") - return - } - if cout.closed { - t.Error("Closing was not expected") - return - } -} - -type testCustomDispatcherFlushReceiver struct { - customTestReceiver -} - -func TestCustomDispatcher_Flush(t *testing.T) { - recName := "TestCustomDispatcher_Flush" - RegisterReceiver(recName, &testCustomDispatcherFlushReceiver{}) - - customDispatcher, err := NewCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": "testdata", - }, - }) - if err != nil { - t.Error(err) - return - } - - customDispatcher.Flush() - - cout := customDispatcher.innerReceiver.(*testCustomDispatcherFlushReceiver).customTestReceiver.co - if cout.initCalled != true { - t.Error("Init not called") - return - } - if cout.dataPassed != "testdata" { - t.Errorf("wrong data passed: '%s'", cout.dataPassed) - return - } - if cout.messageOutput != "" { - t.Errorf("wrong message output: '%s'", cout.messageOutput) - return - } - if cout.levelOutput != TraceLvl { - t.Errorf("wrong log level: '%s'", cout.levelOutput) - return - } - if !cout.flushed { - t.Error("Flush was expected") - return - } - if cout.closed { - t.Error("Closing was not expected") - return - } -} - -type testCustomDispatcherCloseReceiver struct { - customTestReceiver -} - -func TestCustomDispatcher_Close(t *testing.T) { - recName := "TestCustomDispatcher_Close" - RegisterReceiver(recName, &testCustomDispatcherCloseReceiver{}) - - customDispatcher, err := NewCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{ - XmlCustomAttrs: map[string]string{ - "test": "testdata", - }, - }) - if err != nil { - t.Error(err) - return - } - - customDispatcher.Close() - - cout := customDispatcher.innerReceiver.(*testCustomDispatcherCloseReceiver).customTestReceiver.co - if cout.initCalled != true { - t.Error("Init not called") - return - } - if cout.dataPassed != "testdata" { - t.Errorf("wrong data passed: '%s'", cout.dataPassed) - return - } - if cout.messageOutput != "" { - t.Errorf("wrong message output: '%s'", cout.messageOutput) - return - } - if cout.levelOutput != TraceLvl { - t.Errorf("wrong log level: '%s'", cout.levelOutput) - return - } - if !cout.flushed { - t.Error("Flush was expected") - return - } - if !cout.closed { - t.Error("Closing was expected") - return - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_dispatcher.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_dispatcher.go deleted file mode 100644 index 2bd3b4a4..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_dispatcher.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "io" -) - -// A dispatcherInterface is used to dispatch message to all underlying receivers. -// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc. -// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs -// an immediate cleanup of all data that is stored in the receivers -type dispatcherInterface interface { - flusherInterface - io.Closer - Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) -} - -type dispatcher struct { - formatter *formatter - writers []*formattedWriter - dispatchers []dispatcherInterface -} - -// Creates a dispatcher which dispatches data to a list of receivers. -// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned -func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) { - if formatter == nil { - return nil, errors.New("formatter cannot be nil") - } - if receivers == nil || len(receivers) == 0 { - return nil, errors.New("receivers cannot be nil or empty") - } - - disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)} - for _, receiver := range receivers { - writer, ok := receiver.(*formattedWriter) - if ok { - disp.writers = append(disp.writers, writer) - continue - } - - ioWriter, ok := receiver.(io.Writer) - if ok { - writer, err := NewFormattedWriter(ioWriter, disp.formatter) - if err != nil { - return nil, err - } - disp.writers = append(disp.writers, writer) - continue - } - - dispInterface, ok := receiver.(dispatcherInterface) - if ok { - disp.dispatchers = append(disp.dispatchers, dispInterface) - continue - } - - return nil, errors.New("method can receive either io.Writer or dispatcherInterface") - } - - return disp, nil -} - -func (disp *dispatcher) Dispatch( - message string, - level LogLevel, - context LogContextInterface, - errorFunc func(err error)) { - - for _, writer := range disp.writers { - err := writer.Write(message, level, context) - if err != nil { - errorFunc(err) - } - } - - for _, dispInterface := range disp.dispatchers { - dispInterface.Dispatch(message, level, context, errorFunc) - } -} - -// Flush goes through all underlying writers which implement flusherInterface interface -// and closes them. Recursively performs the same action for underlying dispatchers -func (disp *dispatcher) Flush() { - for _, disp := range disp.Dispatchers() { - disp.Flush() - } - - for _, formatWriter := range disp.Writers() { - flusher, ok := formatWriter.Writer().(flusherInterface) - if ok { - flusher.Flush() - } - } -} - -// Close goes through all underlying writers which implement io.Closer interface -// and closes them. Recursively performs the same action for underlying dispatchers -// Before closing, writers are flushed to prevent loss of any buffered data, so -// a call to Flush() func before Close() is not necessary -func (disp *dispatcher) Close() error { - for _, disp := range disp.Dispatchers() { - disp.Flush() - err := disp.Close() - if err != nil { - return err - } - } - - for _, formatWriter := range disp.Writers() { - flusher, ok := formatWriter.Writer().(flusherInterface) - if ok { - flusher.Flush() - } - - closer, ok := formatWriter.Writer().(io.Closer) - if ok { - err := closer.Close() - if err != nil { - return err - } - } - } - - return nil -} - -func (disp *dispatcher) Writers() []*formattedWriter { - return disp.writers -} - -func (disp *dispatcher) Dispatchers() []dispatcherInterface { - return disp.dispatchers -} - -func (disp *dispatcher) String() string { - str := "formatter: " + disp.formatter.String() + "\n" - - str += " ->Dispatchers:" - - if len(disp.dispatchers) == 0 { - str += "none\n" - } else { - str += "\n" - - for _, disp := range disp.dispatchers { - str += fmt.Sprintf(" ->%s", disp) - } - } - - str += " ->Writers:" - - if len(disp.writers) == 0 { - str += "none\n" - } else { - str += "\n" - - for _, writer := range disp.writers { - str += fmt.Sprintf(" ->%s\n", writer) - } - } - - return str -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_filterdispatcher.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_filterdispatcher.go deleted file mode 100644 index 9de8a722..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_filterdispatcher.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" -) - -// A filterDispatcher writes the given message to underlying receivers only if message log level -// is in the allowed list. -type filterDispatcher struct { - *dispatcher - allowList map[LogLevel]bool -} - -// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels. -func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) { - disp, err := createDispatcher(formatter, receivers) - if err != nil { - return nil, err - } - - allows := make(map[LogLevel]bool) - for _, allowLevel := range allowList { - allows[allowLevel] = true - } - - return &filterDispatcher{disp, allows}, nil -} - -func (filter *filterDispatcher) Dispatch( - message string, - level LogLevel, - context LogContextInterface, - errorFunc func(err error)) { - isAllowed, ok := filter.allowList[level] - if ok && isAllowed { - filter.dispatcher.Dispatch(message, level, context, errorFunc) - } -} - -func (filter *filterDispatcher) String() string { - return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_filterdispatcher_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_filterdispatcher_test.go deleted file mode 100644 index c1894a76..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_filterdispatcher_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "testing" -) - -func TestfilterDispatcher_Pass(t *testing.T) { - writer, _ := newBytesVerifier(t) - filter, err := NewFilterDispatcher(onlyMessageFormatForTest, []interface{}{writer}, TraceLvl) - if err != nil { - t.Error(err) - return - } - - context, err := currentContext(nil) - if err != nil { - t.Error(err) - return - } - - bytes := []byte("Hello") - writer.ExpectBytes(bytes) - filter.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) - writer.MustNotExpect() -} - -func TestfilterDispatcher_Deny(t *testing.T) { - writer, _ := newBytesVerifier(t) - filter, err := NewFilterDispatcher(DefaultFormatter, []interface{}{writer}) - if err != nil { - t.Error(err) - return - } - - context, err := currentContext(nil) - if err != nil { - t.Error(err) - return - } - - bytes := []byte("Hello") - filter.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_splitdispatcher.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_splitdispatcher.go deleted file mode 100644 index 1d0fe7ea..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_splitdispatcher.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" -) - -// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.) -type splitDispatcher struct { - *dispatcher -} - -func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) { - disp, err := createDispatcher(formatter, receivers) - if err != nil { - return nil, err - } - - return &splitDispatcher{disp}, nil -} - -func (splitter *splitDispatcher) String() string { - return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String()) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_splitdispatcher_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_splitdispatcher_test.go deleted file mode 100644 index fc4651c2..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/dispatch_splitdispatcher_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "testing" -) - -var onlyMessageFormatForTest *formatter - -func init() { - var err error - onlyMessageFormatForTest, err = NewFormatter("%Msg") - if err != nil { - fmt.Println("Can not create only message format: " + err.Error()) - } -} - -func TestsplitDispatcher(t *testing.T) { - writer1, _ := newBytesVerifier(t) - writer2, _ := newBytesVerifier(t) - spliter, err := NewSplitDispatcher(onlyMessageFormatForTest, []interface{}{writer1, writer2}) - if err != nil { - t.Error(err) - return - } - - context, err := currentContext(nil) - if err != nil { - t.Error(err) - return - } - - bytes := []byte("Hello") - - writer1.ExpectBytes(bytes) - writer2.ExpectBytes(bytes) - spliter.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) - writer1.MustNotExpect() - writer2.MustNotExpect() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/doc.go b/Godeps/_workspace/src/github.com/cihub/seelog/doc.go deleted file mode 100644 index 2734c9cb..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/doc.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2014 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package seelog implements logging functionality with flexible dispatching, filtering, and formatting. - -Creation - -To create a logger, use one of the following constructors: - func LoggerFromConfigAsBytes - func LoggerFromConfigAsFile - func LoggerFromConfigAsString - func LoggerFromWriterWithMinLevel - func LoggerFromWriterWithMinLevelAndFormat - func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers) -Example: - import log "github.com/cihub/seelog" - - func main() { - logger, err := log.LoggerFromConfigAsFile("seelog.xml") - if err != nil { - panic(err) - } - defer logger.Flush() - ... use logger ... - } -The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some -messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you -explicitly defer flushing all messages before closing. - -Usage - -Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs. -Example: - import log "github.com/cihub/seelog" - - func main() { - logger, err := log.LoggerFromConfigAsFile("seelog.xml") - if err != nil { - panic(err) - } - defer logger.Flush() - logger.Trace("test") - logger.Debugf("var = %s", "abc") - } - -Having loggers as variables is convenient if you are writing your own package with internal logging or if you have -several loggers with different options. -But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level -var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs: - import log "github.com/cihub/seelog" - - func main() { - logger, err := log.LoggerFromConfigAsFile("seelog.xml") - if err != nil { - panic(err) - } - log.ReplaceLogger(logger) - defer log.Flush() - log.Trace("test") - log.Debugf("var = %s", "abc") - } -Last lines - log.Trace("test") - log.Debugf("var = %s", "abc") -do the same as - log.Current.Trace("test") - log.Current.Debugf("var = %s", "abc") -In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config. -This way you are able to use package level funcs instead of passing the logger variable. - -Configuration - -Main seelog point is to configure logger via config files and not the code. -The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try -to create a logger using it. - -All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki. -There are many sections covering different aspects of seelog, but the most important for understanding configs are: - https://github.com/cihub/seelog/wiki/Constraints-and-exceptions - https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers - https://github.com/cihub/seelog/wiki/Formatting - https://github.com/cihub/seelog/wiki/Logger-types -After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date -list of dispatchers, receivers, formats, and logger types. - -Here is an example config with all these features: - - - - - - - - - - - - - - - - - - - - - -This config represents a logger with adaptive timeout between log messages (check logger types reference) which -logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only -use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test' -this logger prohibits all levels below 'error'. - -Configuration using code - -Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what -you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New* -functions in this package are used to provide such capabilities. - -Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with -a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for -the 'main.go' file. So, this is basically a demonstration of configuration of most of the features: - - package main - - import log "github.com/cihub/seelog" - - func main() { - defer log.Flush() - log.Info("Hello from Seelog!") - - consoleWriter, _ := log.NewConsoleWriter() - formatter, _ := log.NewFormatter("%Level %Msg %File%n") - root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter}) - constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl) - specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl}) - ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints) - exceptions := []*log.LogLevelException{ex} - - logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root)) - log.ReplaceLogger(logger) - - log.Trace("This should not be seen") - log.Debug("This should not be seen") - log.Info("Test") - log.Error("Test2") - } - -Examples - -To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples -It contains many example configs and usecases. -*/ -package seelog diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/format.go b/Godeps/_workspace/src/github.com/cihub/seelog/format.go deleted file mode 100644 index 32682f34..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/format.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// FormatterSymbol is a special symbol used in config files to mark special format aliases. -const ( - FormatterSymbol = '%' -) - -const ( - formatterParameterStart = '(' - formatterParameterEnd = ')' -) - -// Time and date formats used for %Date and %Time aliases. -const ( - DateDefaultFormat = "2006-01-02" - TimeFormat = "15:04:05" -) - -var DefaultMsgFormat = "%Ns [%Level] %Msg%n" - -var ( - DefaultFormatter *formatter - msgonlyformatter *formatter -) - -func init() { - var err error - if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil { - reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err)) - } - if msgonlyformatter, err = NewFormatter("%Msg"); err != nil { - reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err)) - } -} - -// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute -// of the 'format' config item. These special symbols are replaced with context values or special -// strings when message is written to byte receiver. -// -// Check https://github.com/cihub/seelog/wiki/Formatting for details. -// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference -// -// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object -// that can be evaluated as string. -type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{} - -// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized -// formatters (such as %Date or %EscM) and custom user formatters. -type FormatterFuncCreator func(param string) FormatterFunc - -var formatterFuncs = map[string]FormatterFunc{ - "Level": formatterLevel, - "Lev": formatterLev, - "LEVEL": formatterLEVEL, - "LEV": formatterLEV, - "l": formatterl, - "Msg": formatterMsg, - "FullPath": formatterFullPath, - "File": formatterFile, - "RelFile": formatterRelFile, - "Func": FormatterFunction, - "FuncShort": FormatterFunctionShort, - "Line": formatterLine, - "Time": formatterTime, - "UTCTime": formatterUTCTime, - "Ns": formatterNs, - "UTCNs": formatterUTCNs, - "n": formattern, - "t": formattert, -} - -var formatterFuncsParameterized = map[string]FormatterFuncCreator{ - "Date": createDateTimeFormatterFunc, - "UTCDate": createUTCDateTimeFormatterFunc, - "EscM": createANSIEscapeFunc, -} - -func errorAliasReserved(name string) error { - return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name) -} - -// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil, -// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and -// it will be treated like the standard parameterized formatter identifiers. -// -// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation -// is to call it once in 'init' func of your application or any initializer func. -// -// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters. -// -// Name must only consist of letters (unicode.IsLetter). -// -// Name must not be one of the already registered standard formatter names -// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered -// custom format names. To avoid any potential name conflicts (in future releases), it is recommended -// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword. -func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error { - if _, ok := formatterFuncs[name]; ok { - return errorAliasReserved(name) - } - if _, ok := formatterFuncsParameterized[name]; ok { - return errorAliasReserved(name) - } - formatterFuncsParameterized[name] = creator - return nil -} - -// formatter is used to write messages in a specific format, inserting such additional data -// as log level, date/time, etc. -type formatter struct { - fmtStringOriginal string - fmtString string - formatterFuncs []FormatterFunc -} - -// NewFormatter creates a new formatter using a format string -func NewFormatter(formatString string) (*formatter, error) { - fmtr := new(formatter) - fmtr.fmtStringOriginal = formatString - if err := buildFormatterFuncs(fmtr); err != nil { - return nil, err - } - return fmtr, nil -} - -func buildFormatterFuncs(formatter *formatter) error { - var ( - fsbuf = new(bytes.Buffer) - fsolm1 = len(formatter.fmtStringOriginal) - 1 - ) - for i := 0; i <= fsolm1; i++ { - if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol { - fsbuf.WriteByte(char) - continue - } - // Check if the index is at the end of the string. - if i == fsolm1 { - return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol) - } - // Check if the formatter symbol is doubled and skip it as nonmatching. - if formatter.fmtStringOriginal[i+1] == FormatterSymbol { - fsbuf.WriteRune(FormatterSymbol) - i++ - continue - } - function, ni, err := formatter.extractFormatterFunc(i + 1) - if err != nil { - return err - } - // Append formatting string "%v". - fsbuf.Write([]byte{37, 118}) - i = ni - formatter.formatterFuncs = append(formatter.formatterFuncs, function) - } - formatter.fmtString = fsbuf.String() - return nil -} - -func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) { - letterSequence := formatter.extractLetterSequence(index) - if len(letterSequence) == 0 { - return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index) - } - - function, formatterLength, ok := formatter.findFormatterFunc(letterSequence) - if ok { - return function, index + formatterLength - 1, nil - } - - function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index) - if err != nil { - return nil, 0, err - } - if ok { - return function, index + formatterLength - 1, nil - } - - return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence) -} - -func (formatter *formatter) extractLetterSequence(index int) string { - letters := "" - - bytesToParse := []byte(formatter.fmtStringOriginal[index:]) - runeCount := utf8.RuneCount(bytesToParse) - for i := 0; i < runeCount; i++ { - rune, runeSize := utf8.DecodeRune(bytesToParse) - bytesToParse = bytesToParse[runeSize:] - - if unicode.IsLetter(rune) { - letters += string(rune) - } else { - break - } - } - return letters -} - -func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) { - currentVerb := letters - for i := 0; i < len(letters); i++ { - function, ok := formatterFuncs[currentVerb] - if ok { - return function, len(currentVerb), ok - } - currentVerb = currentVerb[:len(currentVerb)-1] - } - - return nil, 0, false -} - -func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) { - currentVerb := letters - for i := 0; i < len(letters); i++ { - functionCreator, ok := formatterFuncsParameterized[currentVerb] - if ok { - parameter := "" - parameterLen := 0 - isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless - if isVerbEqualsLetters { - userParameter := "" - var err error - userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb)) - if ok { - parameter = userParameter - } else if err != nil { - return nil, 0, false, err - } - } - - return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil - } - - currentVerb = currentVerb[:len(currentVerb)-1] - } - - return nil, 0, false, nil -} - -func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) { - if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart { - return "", 0, false, nil - } - - endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd)) - if endIndex == -1 { - return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s", - startIndex, formatter.fmtStringOriginal[startIndex:]) - } - endIndex += startIndex - - length := endIndex - startIndex + 1 - - return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil -} - -// Format processes a message with special formatters, log level, and context. Returns formatted string -// with all formatter identifiers changed to appropriate values. -func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string { - if len(formatter.formatterFuncs) == 0 { - return formatter.fmtString - } - - params := make([]interface{}, len(formatter.formatterFuncs)) - for i, function := range formatter.formatterFuncs { - params[i] = function(message, level, context) - } - - return fmt.Sprintf(formatter.fmtString, params...) -} - -func (formatter *formatter) String() string { - return formatter.fmtStringOriginal -} - -//===================================================== - -const ( - wrongLogLevel = "WRONG_LOGLEVEL" - wrongEscapeCode = "WRONG_ESCAPE" -) - -var levelToString = map[LogLevel]string{ - TraceLvl: "Trace", - DebugLvl: "Debug", - InfoLvl: "Info", - WarnLvl: "Warn", - ErrorLvl: "Error", - CriticalLvl: "Critical", - Off: "Off", -} - -var levelToShortString = map[LogLevel]string{ - TraceLvl: "Trc", - DebugLvl: "Dbg", - InfoLvl: "Inf", - WarnLvl: "Wrn", - ErrorLvl: "Err", - CriticalLvl: "Crt", - Off: "Off", -} - -var levelToShortestString = map[LogLevel]string{ - TraceLvl: "t", - DebugLvl: "d", - InfoLvl: "i", - WarnLvl: "w", - ErrorLvl: "e", - CriticalLvl: "c", - Off: "o", -} - -func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} { - levelStr, ok := levelToString[level] - if !ok { - return wrongLogLevel - } - return levelStr -} - -func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} { - levelStr, ok := levelToShortString[level] - if !ok { - return wrongLogLevel - } - return levelStr -} - -func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} { - return strings.ToTitle(formatterLevel(message, level, context).(string)) -} - -func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} { - return strings.ToTitle(formatterLev(message, level, context).(string)) -} - -func formatterl(message string, level LogLevel, context LogContextInterface) interface{} { - levelStr, ok := levelToShortestString[level] - if !ok { - return wrongLogLevel - } - return levelStr -} - -func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} { - return message -} - -func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} { - return context.FullPath() -} - -func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} { - return context.FileName() -} - -func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} { - return context.ShortPath() -} - -func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} { - return context.Func() -} - -func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} { - f := context.Func() - spl := strings.Split(f, ".") - return spl[len(spl)-1] -} - -func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} { - return context.Line() -} - -func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().Format(TimeFormat) -} - -func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UTC().Format(TimeFormat) -} - -func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UnixNano() -} - -func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UTC().UnixNano() -} - -func formattern(message string, level LogLevel, context LogContextInterface) interface{} { - return "\n" -} - -func formattert(message string, level LogLevel, context LogContextInterface) interface{} { - return "\t" -} - -func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { - format := dateTimeFormat - if format == "" { - format = DateDefaultFormat - } - return func(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().Format(format) - } -} - -func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { - format := dateTimeFormat - if format == "" { - format = DateDefaultFormat - } - return func(message string, level LogLevel, context LogContextInterface) interface{} { - return context.CallTime().UTC().Format(format) - } -} - -func createANSIEscapeFunc(escapeCodeString string) FormatterFunc { - return func(message string, level LogLevel, context LogContextInterface) interface{} { - if len(escapeCodeString) == 0 { - return wrongEscapeCode - } - - return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/format_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/format_test.go deleted file mode 100644 index dd61bdfd..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/format_test.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "strings" - "testing" - "time" -) - -const ( - TestFuncName = "TestFormats" -) - -type formatTest struct { - formatString string - input string - inputLogLevel LogLevel - expectedOutput string - errorExpected bool -} - -var formatTests = []formatTest{ - {"test", "abcdef", TraceLvl, "test", false}, - {"", "abcdef", TraceLvl, "", false}, - {"%Level", "", TraceLvl, "Trace", false}, - {"%Level", "", DebugLvl, "Debug", false}, - {"%Level", "", InfoLvl, "Info", false}, - {"%Level", "", WarnLvl, "Warn", false}, - {"%Level", "", ErrorLvl, "Error", false}, - {"%Level", "", CriticalLvl, "Critical", false}, - {"[%Level]", "", TraceLvl, "[Trace]", false}, - {"[%Level]", "abc", DebugLvl, "[Debug]", false}, - {"%LevelLevel", "", InfoLvl, "InfoLevel", false}, - {"[%Level][%Level]", "", WarnLvl, "[Warn][Warn]", false}, - {"[%Level]X[%Level]", "", ErrorLvl, "[Error]X[Error]", false}, - {"%Levelll", "", CriticalLvl, "Criticalll", false}, - {"%Lvl", "", TraceLvl, "", true}, - {"%%Level", "", DebugLvl, "%Level", false}, - {"%Level%", "", InfoLvl, "", true}, - {"%sevel", "", WarnLvl, "", true}, - {"Level", "", ErrorLvl, "Level", false}, - {"%LevelLevel", "", CriticalLvl, "CriticalLevel", false}, - {"%Lev", "", TraceLvl, "Trc", false}, - {"%Lev", "", DebugLvl, "Dbg", false}, - {"%Lev", "", InfoLvl, "Inf", false}, - {"%Lev", "", WarnLvl, "Wrn", false}, - {"%Lev", "", ErrorLvl, "Err", false}, - {"%Lev", "", CriticalLvl, "Crt", false}, - {"[%Lev]", "", TraceLvl, "[Trc]", false}, - {"[%Lev]", "abc", DebugLvl, "[Dbg]", false}, - {"%LevLevel", "", InfoLvl, "InfLevel", false}, - {"[%Level][%Lev]", "", WarnLvl, "[Warn][Wrn]", false}, - {"[%Lev]X[%Lev]", "", ErrorLvl, "[Err]X[Err]", false}, - {"%Levll", "", CriticalLvl, "Crtll", false}, - {"%LEVEL", "", TraceLvl, "TRACE", false}, - {"%LEVEL", "", DebugLvl, "DEBUG", false}, - {"%LEVEL", "", InfoLvl, "INFO", false}, - {"%LEVEL", "", WarnLvl, "WARN", false}, - {"%LEVEL", "", ErrorLvl, "ERROR", false}, - {"%LEVEL", "", CriticalLvl, "CRITICAL", false}, - {"[%LEVEL]", "", TraceLvl, "[TRACE]", false}, - {"[%LEVEL]", "abc", DebugLvl, "[DEBUG]", false}, - {"%LEVELLEVEL", "", InfoLvl, "INFOLEVEL", false}, - {"[%LEVEL][%LEVEL]", "", WarnLvl, "[WARN][WARN]", false}, - {"[%LEVEL]X[%Level]", "", ErrorLvl, "[ERROR]X[Error]", false}, - {"%LEVELLL", "", CriticalLvl, "CRITICALLL", false}, - {"%LEV", "", TraceLvl, "TRC", false}, - {"%LEV", "", DebugLvl, "DBG", false}, - {"%LEV", "", InfoLvl, "INF", false}, - {"%LEV", "", WarnLvl, "WRN", false}, - {"%LEV", "", ErrorLvl, "ERR", false}, - {"%LEV", "", CriticalLvl, "CRT", false}, - {"[%LEV]", "", TraceLvl, "[TRC]", false}, - {"[%LEV]", "abc", DebugLvl, "[DBG]", false}, - {"%LEVLEVEL", "", InfoLvl, "INFLEVEL", false}, - {"[%LEVEL][%LEV]", "", WarnLvl, "[WARN][WRN]", false}, - {"[%LEV]X[%LEV]", "", ErrorLvl, "[ERR]X[ERR]", false}, - {"%LEVLL", "", CriticalLvl, "CRTLL", false}, - {"%l", "", TraceLvl, "t", false}, - {"%l", "", DebugLvl, "d", false}, - {"%l", "", InfoLvl, "i", false}, - {"%l", "", WarnLvl, "w", false}, - {"%l", "", ErrorLvl, "e", false}, - {"%l", "", CriticalLvl, "c", false}, - {"[%l]", "", TraceLvl, "[t]", false}, - {"[%l]", "abc", DebugLvl, "[d]", false}, - {"%Level%Msg", "", TraceLvl, "Trace", false}, - {"%Level%Msg", "A", DebugLvl, "DebugA", false}, - {"%Level%Msg", "", InfoLvl, "Info", false}, - {"%Level%Msg", "test", WarnLvl, "Warntest", false}, - {"%Level%Msg", " ", ErrorLvl, "Error ", false}, - {"%Level%Msg", "", CriticalLvl, "Critical", false}, - {"[%Level]", "", TraceLvl, "[Trace]", false}, - {"[%Level]", "abc", DebugLvl, "[Debug]", false}, - {"%Level%MsgLevel", "A", InfoLvl, "InfoALevel", false}, - {"[%Level]%Msg[%Level]", "test", WarnLvl, "[Warn]test[Warn]", false}, - {"[%Level]%MsgX[%Level]", "test", ErrorLvl, "[Error]testX[Error]", false}, - {"%Levell%Msgl", "Test", CriticalLvl, "CriticallTestl", false}, - {"%Lev%Msg%LEVEL%LEV%l%Msg", "Test", InfoLvl, "InfTestINFOINFiTest", false}, - {"%n", "", CriticalLvl, "\n", false}, - {"%t", "", CriticalLvl, "\t", false}, -} - -func TestFormats(t *testing.T) { - - context, conErr := currentContext(nil) - if conErr != nil { - t.Fatal("Cannot get current context:" + conErr.Error()) - return - } - - for _, test := range formatTests { - - form, err := NewFormatter(test.formatString) - - if (err != nil) != test.errorExpected { - t.Errorf("input: %s \nInput LL: %s\n* Expected error:%t Got error: %t\n", - test.input, test.inputLogLevel, test.errorExpected, (err != nil)) - if err != nil { - t.Logf("%s\n", err.Error()) - } - continue - } else if err != nil { - continue - } - - msg := form.Format(test.input, test.inputLogLevel, context) - - if err == nil && msg != test.expectedOutput { - t.Errorf("format: %s \nInput: %s \nInput LL: %s\n* Expected: %s \n* Got: %s\n", - test.formatString, test.input, test.inputLogLevel, test.expectedOutput, msg) - } - } -} - -func TestDateFormat(t *testing.T) { - _, err := NewFormatter("%Date") - if err != nil { - t.Error("Unexpected error: " + err.Error()) - } -} - -func TestDateParameterizedFormat(t *testing.T) { - testFormat := "Mon Jan 02 2006 15:04:05" - preciseForamt := "Mon Jan 02 2006 15:04:05.000" - - context, conErr := currentContext(nil) - if conErr != nil { - t.Fatal("Cannot get current context:" + conErr.Error()) - return - } - - form, err := NewFormatter("%Date(" + preciseForamt + ")") - if err != nil { - t.Error("Unexpected error: " + err.Error()) - } - - dateBefore := time.Now().Format(testFormat) - msg := form.Format("", TraceLvl, context) - dateAfter := time.Now().Format(testFormat) - - if !strings.HasPrefix(msg, dateBefore) && !strings.HasPrefix(msg, dateAfter) { - t.Errorf("incorrect message: %v. Expected %v or %v", msg, dateBefore, dateAfter) - } - - _, err = NewFormatter("%Date(" + preciseForamt) - if err == nil { - t.Error("Expected error for invalid format") - } -} - -func createTestFormatter(format string) FormatterFunc { - return func(message string, level LogLevel, context LogContextInterface) interface{} { - return "TEST " + context.Func() + " TEST" - } -} - -func TestCustomFormatterRegistration(t *testing.T) { - err := RegisterCustomFormatter("Level", createTestFormatter) - if err == nil { - t.Errorf("expected an error when trying to register a custom formatter with a reserved alias") - } - err = RegisterCustomFormatter("EscM", createTestFormatter) - if err == nil { - t.Errorf("expected an error when trying to register a custom formatter with a reserved parameterized alias") - } - err = RegisterCustomFormatter("TEST", createTestFormatter) - if err != nil { - t.Fatalf("Registering custom formatter: unexpected error: %s", err) - } - err = RegisterCustomFormatter("TEST", createTestFormatter) - if err == nil { - t.Errorf("expected an error when trying to register a custom formatter with duplicate name") - } - - context, conErr := currentContext(nil) - if conErr != nil { - t.Fatal("Cannot get current context:" + conErr.Error()) - return - } - - form, err := NewFormatter("%Msg %TEST 123") - if err != nil { - t.Fatalf("%s\n", err.Error()) - } - - expected := fmt.Sprintf("test TEST %sTestCustomFormatterRegistration TEST 123", commonPrefix) - msg := form.Format("test", DebugLvl, context) - if msg != expected { - t.Fatalf("Custom formatter: invalid output. Expected: '%s'. Got: '%s'", expected, msg) - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/internals_baseerror.go b/Godeps/_workspace/src/github.com/cihub/seelog/internals_baseerror.go deleted file mode 100644 index c0b271d7..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/internals_baseerror.go +++ /dev/null @@ -1,10 +0,0 @@ -package seelog - -// Base struct for custom errors. -type baseError struct { - message string -} - -func (be baseError) Error() string { - return be.message -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/internals_byteverifiers_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/internals_byteverifiers_test.go deleted file mode 100644 index 0ab6ebc6..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/internals_byteverifiers_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "strconv" - "testing" -) - -// bytesVerifier is a byte receiver which is used for correct input testing. -// It allows to compare expected result and actual result in context of received bytes. -type bytesVerifier struct { - expectedBytes []byte // bytes that are expected to be written in next Write call - waitingForInput bool // true if verifier is waiting for a Write call - writtenData []byte // real bytes that actually were received during the last Write call - testEnv *testing.T -} - -func newBytesVerifier(t *testing.T) (*bytesVerifier, error) { - if t == nil { - return nil, errors.New("testing environment param is nil") - } - - verifier := new(bytesVerifier) - verifier.testEnv = t - - return verifier, nil -} - -// Write is used to check whether verifier was waiting for input and whether bytes are the same as expectedBytes. -// After Write call, waitingForInput is set to false. -func (verifier *bytesVerifier) Write(bytes []byte) (n int, err error) { - if !verifier.waitingForInput { - verifier.testEnv.Errorf("unexpected input: %v", string(bytes)) - return - } - - verifier.waitingForInput = false - verifier.writtenData = bytes - - if verifier.expectedBytes != nil { - if bytes == nil { - verifier.testEnv.Errorf("incoming 'bytes' is nil") - } else { - if len(bytes) != len(verifier.expectedBytes) { - verifier.testEnv.Errorf("'Bytes' has unexpected len. Expected: %d. Got: %d. . Expected string: %q. Got: %q", - len(verifier.expectedBytes), len(bytes), string(verifier.expectedBytes), string(bytes)) - } else { - for i := 0; i < len(bytes); i++ { - if verifier.expectedBytes[i] != bytes[i] { - verifier.testEnv.Errorf("incorrect data on position %d. Expected: %d. Got: %d. Expected string: %q. Got: %q", - i, verifier.expectedBytes[i], bytes[i], string(verifier.expectedBytes), string(bytes)) - break - } - } - } - } - } - - return len(bytes), nil -} - -func (verifier *bytesVerifier) ExpectBytes(bytes []byte) { - verifier.waitingForInput = true - verifier.expectedBytes = bytes -} - -func (verifier *bytesVerifier) MustNotExpect() { - if verifier.waitingForInput { - errorText := "Unexpected input: " - - if verifier.expectedBytes != nil { - errorText += "len = " + strconv.Itoa(len(verifier.expectedBytes)) - errorText += ". text = " + string(verifier.expectedBytes) - } - - verifier.testEnv.Errorf(errorText) - } -} - -func (verifier *bytesVerifier) Close() error { - return nil -} - -// nullWriter implements io.Writer inteface and does nothing, always returning a successful write result -type nullWriter struct { -} - -func (writer *nullWriter) Write(bytes []byte) (n int, err error) { - return len(bytes), nil -} - -func (writer *nullWriter) Close() error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/internals_fsutils.go b/Godeps/_workspace/src/github.com/cihub/seelog/internals_fsutils.go deleted file mode 100644 index 5baa6ba6..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/internals_fsutils.go +++ /dev/null @@ -1,403 +0,0 @@ -package seelog - -import ( - "archive/zip" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" -) - -// File and directory permitions. -const ( - defaultFilePermissions = 0666 - defaultDirectoryPermissions = 0767 -) - -const ( - // Max number of directories can be read asynchronously. - maxDirNumberReadAsync = 1000 -) - -type cannotOpenFileError struct { - baseError -} - -func newCannotOpenFileError(fname string) *cannotOpenFileError { - return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}} -} - -type notDirectoryError struct { - baseError -} - -func newNotDirectoryError(dname string) *notDirectoryError { - return ¬DirectoryError{baseError{message: dname + " is not directory"}} -} - -// fileFilter is a filtering criteria function for '*os.File'. -// Must return 'false' to set aside the given file. -type fileFilter func(os.FileInfo, *os.File) bool - -// filePathFilter is a filtering creteria function for file path. -// Must return 'false' to set aside the given file. -type filePathFilter func(filePath string) bool - -// GetSubdirNames returns a list of directories found in -// the given one with dirPath. -func getSubdirNames(dirPath string) ([]string, error) { - fi, err := os.Stat(dirPath) - if err != nil { - return nil, err - } - if !fi.IsDir() { - return nil, newNotDirectoryError(dirPath) - } - dd, err := os.Open(dirPath) - // Cannot open file. - if err != nil { - if dd != nil { - dd.Close() - } - return nil, err - } - defer dd.Close() - // TODO: Improve performance by buffering reading. - allEntities, err := dd.Readdir(-1) - if err != nil { - return nil, err - } - subDirs := []string{} - for _, entity := range allEntities { - if entity.IsDir() { - subDirs = append(subDirs, entity.Name()) - } - } - return subDirs, nil -} - -// getSubdirAbsPaths recursively visit all the subdirectories -// starting from the given directory and returns absolute paths for them. -func getAllSubdirAbsPaths(dirPath string) (res []string, err error) { - dps, err := getSubdirAbsPaths(dirPath) - if err != nil { - res = []string{} - return - } - res = append(res, dps...) - for _, dp := range dps { - sdps, err := getAllSubdirAbsPaths(dp) - if err != nil { - return []string{}, err - } - res = append(res, sdps...) - } - return -} - -// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory. -// Input: (I1) dirPath - absolute path of a directory in question. -// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation. -// Remark: If error (O2) is non-nil then (O1) is nil and vice versa. -func getSubdirAbsPaths(dirPath string) ([]string, error) { - sdns, err := getSubdirNames(dirPath) - if err != nil { - return nil, err - } - rsdns := []string{} - for _, sdn := range sdns { - rsdns = append(rsdns, filepath.Join(dirPath, sdn)) - } - return rsdns, nil -} - -// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory. -// Remark: Ignores files for which fileFilter returns false -func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) { - dfi, err := os.Open(dirPath) - if err != nil { - return nil, newCannotOpenFileError("Cannot open directory " + dirPath) - } - defer dfi.Close() - // Size of read buffer (i.e. chunk of items read at a time). - rbs := 64 - resFiles := []*os.File{} -L: - for { - // Read directory entities by reasonable chuncks - // to prevent overflows on big number of files. - fis, e := dfi.Readdir(rbs) - switch e { - // It's OK. - case nil: - // Do nothing, just continue cycle. - case io.EOF: - break L - // Something went wrong. - default: - return nil, e - } - // THINK: Maybe, use async running. - for _, fi := range fis { - // NB: On Linux this could be a problem as - // there are lots of file types available. - if !fi.IsDir() { - f, e := os.Open(filepath.Join(dirPath, fi.Name())) - if e != nil { - if f != nil { - f.Close() - } - // THINK: Add nil as indicator that a problem occurred. - resFiles = append(resFiles, nil) - continue - } - // Check filter condition. - if fFilter != nil && !fFilter(fi, f) { - continue - } - resFiles = append(resFiles, f) - } - } - } - return resFiles, nil -} - -func isRegular(m os.FileMode) bool { - return m&os.ModeType == 0 -} - -// getDirFilePaths return full paths of the files located in the directory. -// Remark: Ignores files for which fileFilter returns false. -func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) { - dfi, err := os.Open(dirPath) - if err != nil { - return nil, newCannotOpenFileError("Cannot open directory " + dirPath) - } - defer dfi.Close() - - var absDirPath string - if !filepath.IsAbs(dirPath) { - absDirPath, err = filepath.Abs(dirPath) - if err != nil { - return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error()) - } - } else { - absDirPath = dirPath - } - - // TODO: check if dirPath is really directory. - // Size of read buffer (i.e. chunk of items read at a time). - rbs := 2 << 5 - filePaths := []string{} - - var fp string -L: - for { - // Read directory entities by reasonable chuncks - // to prevent overflows on big number of files. - fis, e := dfi.Readdir(rbs) - switch e { - // It's OK. - case nil: - // Do nothing, just continue cycle. - case io.EOF: - break L - // Indicate that something went wrong. - default: - return nil, e - } - // THINK: Maybe, use async running. - for _, fi := range fis { - // NB: Should work on every Windows and non-Windows OS. - if isRegular(fi.Mode()) { - if pathIsName { - fp = fi.Name() - } else { - // Build full path of a file. - fp = filepath.Join(absDirPath, fi.Name()) - } - // Check filter condition. - if fpFilter != nil && !fpFilter(fp) { - continue - } - filePaths = append(filePaths, fp) - } - } - } - return filePaths, nil -} - -// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs -// in map 'filesInDirMap': Key - directory name, value - *os.File slice. -func getOpenFilesByDirectoryAsync( - dirPaths []string, - fFilter fileFilter, - filesInDirMap map[string][]*os.File, -) error { - n := len(dirPaths) - if n > maxDirNumberReadAsync { - return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync) - } - type filesInDirResult struct { - DirName string - Files []*os.File - Error error - } - dirFilesChan := make(chan *filesInDirResult, n) - var wg sync.WaitGroup - // Register n goroutines which are going to do work. - wg.Add(n) - for i := 0; i < n; i++ { - // Launch asynchronously the piece of work. - go func(dirPath string) { - fs, e := getOpenFilesInDir(dirPath, fFilter) - dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e} - // Mark the current goroutine as finished (work is done). - wg.Done() - }(dirPaths[i]) - } - // Wait for all goroutines to finish their work. - wg.Wait() - // Close the error channel to let for-range clause - // get all the buffered values without blocking and quit in the end. - close(dirFilesChan) - for fidr := range dirFilesChan { - if fidr.Error == nil { - // THINK: What will happen if the key is already present? - filesInDirMap[fidr.DirName] = fidr.Files - } else { - return fidr.Error - } - } - return nil -} - -func copyFile(sf *os.File, dst string) (int64, error) { - df, err := os.Create(dst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// fileExists return flag whether a given file exists -// and operation error if an unclassified failure occurs. -func fileExists(path string) (bool, error) { - _, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - return true, nil -} - -// createDirectory makes directory with a given name -// making all parent directories if necessary. -func createDirectory(dirPath string) error { - var dPath string - var err error - if !filepath.IsAbs(dirPath) { - dPath, err = filepath.Abs(dirPath) - if err != nil { - return err - } - } else { - dPath = dirPath - } - exists, err := fileExists(dPath) - if err != nil { - return err - } - if exists { - return nil - } - return os.MkdirAll(dPath, os.ModeDir) -} - -// tryRemoveFile gives a try removing the file -// only ignoring an error when the file does not exist. -func tryRemoveFile(filePath string) (err error) { - err = os.Remove(filePath) - if os.IsNotExist(err) { - err = nil - return - } - return -} - -// Unzips a specified zip file. Returns filename->filebytes map. -func unzip(archiveName string) (map[string][]byte, error) { - // Open a zip archive for reading. - r, err := zip.OpenReader(archiveName) - if err != nil { - return nil, err - } - defer r.Close() - - // Files to be added to archive - // map file name to contents - files := make(map[string][]byte) - - // Iterate through the files in the archive, - // printing some of their contents. - for _, f := range r.File { - rc, err := f.Open() - if err != nil { - return nil, err - } - - bts, err := ioutil.ReadAll(rc) - rcErr := rc.Close() - - if err != nil { - return nil, err - } - if rcErr != nil { - return nil, rcErr - } - - files[f.Name] = bts - } - - return files, nil -} - -// Creates a zip file with the specified file names and byte contents. -func createZip(archiveName string, files map[string][]byte) error { - // Create a buffer to write our archive to. - buf := new(bytes.Buffer) - - // Create a new zip archive. - w := zip.NewWriter(buf) - - // Write files - for fpath, fcont := range files { - f, err := w.Create(fpath) - if err != nil { - return err - } - _, err = f.Write([]byte(fcont)) - if err != nil { - return err - } - } - - // Make sure to check the error on Close. - err := w.Close() - if err != nil { - return err - } - - err = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions) - if err != nil { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/internals_xmlnode.go b/Godeps/_workspace/src/github.com/cihub/seelog/internals_xmlnode.go deleted file mode 100644 index 98588493..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/internals_xmlnode.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "strings" -) - -type xmlNode struct { - name string - attributes map[string]string - children []*xmlNode - value string -} - -func newNode() *xmlNode { - node := new(xmlNode) - node.children = make([]*xmlNode, 0) - node.attributes = make(map[string]string) - return node -} - -func (node *xmlNode) String() string { - str := fmt.Sprintf("<%s", node.name) - - for attrName, attrVal := range node.attributes { - str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal) - } - - str += ">" - str += node.value - - if len(node.children) != 0 { - for _, child := range node.children { - str += fmt.Sprintf("%s", child) - } - } - - str += fmt.Sprintf("", node.name) - - return str -} - -func (node *xmlNode) unmarshal(startEl xml.StartElement) error { - node.name = startEl.Name.Local - - for _, v := range startEl.Attr { - _, alreadyExists := node.attributes[v.Name.Local] - if alreadyExists { - return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'") - } - node.attributes[v.Name.Local] = v.Value - } - - return nil -} - -func (node *xmlNode) add(child *xmlNode) { - if node.children == nil { - node.children = make([]*xmlNode, 0) - } - - node.children = append(node.children, child) -} - -func (node *xmlNode) hasChildren() bool { - return node.children != nil && len(node.children) > 0 -} - -//============================================= - -func unmarshalConfig(reader io.Reader) (*xmlNode, error) { - xmlParser := xml.NewDecoder(reader) - - config, err := unmarshalNode(xmlParser, nil) - if err != nil { - return nil, err - } - if config == nil { - return nil, errors.New("xml has no content") - } - - nextConfigEntry, err := unmarshalNode(xmlParser, nil) - if nextConfigEntry != nil { - return nil, errors.New("xml contains more than one root element") - } - - return config, nil -} - -func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) { - firstLoop := true - for { - var tok xml.Token - if firstLoop && curToken != nil { - tok = curToken - firstLoop = false - } else { - tok, err = getNextToken(xmlParser) - if err != nil || tok == nil { - return - } - } - - switch tt := tok.(type) { - case xml.SyntaxError: - err = errors.New(tt.Error()) - return - case xml.CharData: - value := strings.TrimSpace(string([]byte(tt))) - if node != nil { - node.value += value - } - case xml.StartElement: - if node == nil { - node = newNode() - err := node.unmarshal(tt) - if err != nil { - return nil, err - } - } else { - childNode, childErr := unmarshalNode(xmlParser, tok) - if childErr != nil { - return nil, childErr - } - - if childNode != nil { - node.add(childNode) - } else { - return - } - } - case xml.EndElement: - return - } - } -} - -func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) { - if tok, err = xmlParser.Token(); err != nil { - if err == io.EOF { - err = nil - return - } - return - } - - return -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/internals_xmlnode_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/internals_xmlnode_test.go deleted file mode 100644 index 3a4487ce..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/internals_xmlnode_test.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "strings" - "testing" - //"fmt" - "reflect" -) - -var testEnv *testing.T - -/*func TestWrapper(t *testing.T) { - testEnv = t - - s := "" - reader := strings.NewReader(s) - config, err := unmarshalConfig(reader) - if err != nil { - testEnv.Error(err) - return - } - - printXML(config, 0) -} - -func printXML(node *xmlNode, level int) { - indent := strings.Repeat("\t", level) - fmt.Print(indent + node.name) - for key, value := range node.attributes { - fmt.Print(" " + key + "/" + value) - } - fmt.Println() - - for _, child := range node.children { - printXML(child, level+1) - } -}*/ - -var xmlNodeTests []xmlNodeTest - -type xmlNodeTest struct { - testName string - inputXML string - expected interface{} - errorExpected bool -} - -func getXMLTests() []xmlNodeTest { - if xmlNodeTests == nil { - xmlNodeTests = make([]xmlNodeTest, 0) - - testName := "Simple test" - testXML := `` - testExpected := newNode() - testExpected.name = "a" - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Multiline test" - testXML = - ` - - -` - testExpected = newNode() - testExpected.name = "a" - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Multiline test #2" - testXML = - ` - - - - - - -` - testExpected = newNode() - testExpected.name = "a" - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Incorrect names" - testXML = `< a >< /a >` - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true}) - - testName = "Comments" - testXML = - ` - - -` - testExpected = newNode() - testExpected.name = "a" - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Multiple roots" - testXML = `` - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true}) - - testName = "Multiple roots + incorrect xml" - testXML = `` - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true}) - - testName = "Some unicode and data" - testXML = `<俄语>данные` - testExpected = newNode() - testExpected.name = "俄语" - testExpected.value = "данные" - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Values and children" - testXML = `<俄语>данные` - testExpected = newNode() - testExpected.name = "俄语" - testExpected.value = "данные" - child := newNode() - child.name = "and_a_child" - testExpected.children = append(testExpected.children, child) - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Just children" - testXML = `<俄语>` - testExpected = newNode() - testExpected.name = "俄语" - child = newNode() - child.name = "and_a_child" - testExpected.children = append(testExpected.children, child) - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - - testName = "Mixed test" - testXML = `<俄语 a="1" b="2.13" c="abc">` - testExpected = newNode() - testExpected.name = "俄语" - testExpected.attributes["a"] = "1" - testExpected.attributes["b"] = "2.13" - testExpected.attributes["c"] = "abc" - child = newNode() - child.name = "child" - child.attributes["abc"] = "bca" - testExpected.children = append(testExpected.children, child) - child = newNode() - child.name = "child" - child.attributes["abc"] = "def" - testExpected.children = append(testExpected.children, child) - xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) - } - - return xmlNodeTests -} - -func TestXmlNode(t *testing.T) { - - for _, test := range getXMLTests() { - - reader := strings.NewReader(test.inputXML) - parsedXML, err := unmarshalConfig(reader) - - if (err != nil) != test.errorExpected { - t.Errorf("\n%s:\nXML input: %s\nExpected error:%t. Got error: %t\n", test.testName, - test.inputXML, test.errorExpected, (err != nil)) - if err != nil { - t.Logf("%s\n", err.Error()) - } - continue - } - - if err == nil && !reflect.DeepEqual(parsedXML, test.expected) { - t.Errorf("\n%s:\nXML input: %s\nExpected: %s. \nGot: %s\n", test.testName, - test.inputXML, test.expected, parsedXML) - } - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/log.go b/Godeps/_workspace/src/github.com/cihub/seelog/log.go deleted file mode 100644 index f775e1fd..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/log.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "sync" - "time" -) - -const ( - staticFuncCallDepth = 3 // See 'commonLogger.log' method comments - loggerFuncCallDepth = 3 -) - -// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc. -var Current LoggerInterface - -// Default logger that is created from an empty config: "". It is not closed by a ReplaceLogger call. -var Default LoggerInterface - -// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call. -var Disabled LoggerInterface - -var pkgOperationsMutex *sync.Mutex - -func init() { - pkgOperationsMutex = new(sync.Mutex) - var err error - - if Default == nil { - Default, err = LoggerFromConfigAsBytes([]byte("")) - } - - if Disabled == nil { - Disabled, err = LoggerFromConfigAsBytes([]byte("")) - } - - if err != nil { - panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error())) - } - - Current = Default -} - -func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) { - if config.LogType == syncloggerTypeFromString { - return NewSyncLogger(&config.logConfig), nil - } else if config.LogType == asyncLooploggerTypeFromString { - return NewAsyncLoopLogger(&config.logConfig), nil - } else if config.LogType == asyncTimerloggerTypeFromString { - logData := config.LoggerData - if logData == nil { - return nil, errors.New("async timer data not set") - } - - asyncInt, ok := logData.(asyncTimerLoggerData) - if !ok { - return nil, errors.New("invalid async timer data") - } - - logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval)) - if !ok { - return nil, err - } - - return logger, nil - } else if config.LogType == adaptiveLoggerTypeFromString { - logData := config.LoggerData - if logData == nil { - return nil, errors.New("adaptive logger parameters not set") - } - - adaptData, ok := logData.(adaptiveLoggerData) - if !ok { - return nil, errors.New("invalid adaptive logger parameters") - } - - logger, err := NewAsyncAdaptiveLogger( - &config.logConfig, - time.Duration(adaptData.MinInterval), - time.Duration(adaptData.MaxInterval), - adaptData.CriticalMsgCount, - ) - if err != nil { - return nil, err - } - - return logger, nil - } - return nil, errors.New("invalid config log type/data") -} - -// UseLogger sets the 'Current' package level logger variable to the specified value. -// This variable is used in all Trace/Debug/... package level convenience funcs. -// -// Example: -// -// after calling -// seelog.UseLogger(somelogger) -// the following: -// seelog.Debug("abc") -// will be equal to -// somelogger.Debug("abc") -// -// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if -// you constantly use it to replace loggers and don't close them in other code, you'll -// end up having memory leaks. -// -// To safely replace loggers, use ReplaceLogger. -func UseLogger(logger LoggerInterface) error { - if logger == nil { - return errors.New("logger can not be nil") - } - - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - - oldLogger := Current - Current = logger - - if oldLogger != nil { - oldLogger.Flush() - } - - return nil -} - -// ReplaceLogger acts as UseLogger but the logger that was previously -// used is disposed (except Default and Disabled loggers). -// -// Example: -// import log "github.com/cihub/seelog" -// -// func main() { -// logger, err := log.LoggerFromConfigAsFile("seelog.xml") -// -// if err != nil { -// panic(err) -// } -// -// log.ReplaceLogger(logger) -// defer log.Flush() -// -// log.Trace("test") -// log.Debugf("var = %s", "abc") -// } -func ReplaceLogger(logger LoggerInterface) error { - if logger == nil { - return errors.New("logger can not be nil") - } - - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - - defer func() { - if err := recover(); err != nil { - reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err)) - } - }() - - if Current == Default { - Current.Flush() - } else if Current != nil && !Current.Closed() && Current != Disabled { - Current.Flush() - Current.Close() - } - - Current = logger - - return nil -} - -// Tracef formats message according to format specifier -// and writes to default logger with log level = Trace. -func Tracef(format string, params ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) -} - -// Debugf formats message according to format specifier -// and writes to default logger with log level = Debug. -func Debugf(format string, params ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) -} - -// Infof formats message according to format specifier -// and writes to default logger with log level = Info. -func Infof(format string, params ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) -} - -// Warnf formats message according to format specifier and writes to default logger with log level = Warn -func Warnf(format string, params ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogFormattedMessage(format, params) - Current.warnWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Errorf formats message according to format specifier and writes to default logger with log level = Error -func Errorf(format string, params ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogFormattedMessage(format, params) - Current.errorWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Criticalf formats message according to format specifier and writes to default logger with log level = Critical -func Criticalf(format string, params ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogFormattedMessage(format, params) - Current.criticalWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace -func Trace(v ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v)) -} - -// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug -func Debug(v ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v)) -} - -// Info formats message using the default formats for its operands and writes to default logger with log level = Info -func Info(v ...interface{}) { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v)) -} - -// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn -func Warn(v ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogMessage(v) - Current.warnWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Error formats message using the default formats for its operands and writes to default logger with log level = Error -func Error(v ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogMessage(v) - Current.errorWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical -func Critical(v ...interface{}) error { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - message := newLogMessage(v) - Current.criticalWithCallDepth(staticFuncCallDepth, message) - return errors.New(message.String()) -} - -// Flush immediately processes all currently queued messages and all currently buffered messages. -// It is a blocking call which returns only after the queue is empty and all the buffers are empty. -// -// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '' receivers) -// , because there is no queue. -// -// Call this method when your app is going to shut down not to lose any log messages. -func Flush() { - pkgOperationsMutex.Lock() - defer pkgOperationsMutex.Unlock() - Current.Flush() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/logger.go b/Godeps/_workspace/src/github.com/cihub/seelog/logger.go deleted file mode 100644 index fc96aed4..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/logger.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "os" - "sync" -) - -func reportInternalError(err error) { - fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err) -} - -// LoggerInterface represents structs capable of logging Seelog messages -type LoggerInterface interface { - - // Tracef formats message according to format specifier - // and writes to log with level = Trace. - Tracef(format string, params ...interface{}) - - // Debugf formats message according to format specifier - // and writes to log with level = Debug. - Debugf(format string, params ...interface{}) - - // Infof formats message according to format specifier - // and writes to log with level = Info. - Infof(format string, params ...interface{}) - - // Warnf formats message according to format specifier - // and writes to log with level = Warn. - Warnf(format string, params ...interface{}) error - - // Errorf formats message according to format specifier - // and writes to log with level = Error. - Errorf(format string, params ...interface{}) error - - // Criticalf formats message according to format specifier - // and writes to log with level = Critical. - Criticalf(format string, params ...interface{}) error - - // Trace formats message using the default formats for its operands - // and writes to log with level = Trace - Trace(v ...interface{}) - - // Debug formats message using the default formats for its operands - // and writes to log with level = Debug - Debug(v ...interface{}) - - // Info formats message using the default formats for its operands - // and writes to log with level = Info - Info(v ...interface{}) - - // Warn formats message using the default formats for its operands - // and writes to log with level = Warn - Warn(v ...interface{}) error - - // Error formats message using the default formats for its operands - // and writes to log with level = Error - Error(v ...interface{}) error - - // Critical formats message using the default formats for its operands - // and writes to log with level = Critical - Critical(v ...interface{}) error - - traceWithCallDepth(callDepth int, message fmt.Stringer) - debugWithCallDepth(callDepth int, message fmt.Stringer) - infoWithCallDepth(callDepth int, message fmt.Stringer) - warnWithCallDepth(callDepth int, message fmt.Stringer) - errorWithCallDepth(callDepth int, message fmt.Stringer) - criticalWithCallDepth(callDepth int, message fmt.Stringer) - - // Close flushes all the messages in the logger and closes it. It cannot be used after this operation. - Close() - - // Flush flushes all the messages in the logger. - Flush() - - // Closed returns true if the logger was previously closed. - Closed() bool - - // SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller - // when getting function information needed to print seelog format identifiers such as %Func or %File. - // - // This func may be used when you wrap seelog funcs and want to print caller info of you own - // wrappers instead of seelog func callers. In this case you should set depth = 1. If you then - // wrap your wrapper, you should set depth = 2, etc. - // - // NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect - // function/file names in log files. Do not use it if you are not going to wrap seelog funcs. - // You may reset the value to default using a SetAdditionalStackDepth(0) call. - SetAdditionalStackDepth(depth int) error - - // Sets logger context that can be used in formatter funcs and custom receivers - SetContext(context interface{}) -} - -// innerLoggerInterface is an internal logging interface -type innerLoggerInterface interface { - innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer) - Flush() -} - -// [file path][func name][level] -> [allowed] -type allowedContextCache map[string]map[string]map[LogLevel]bool - -// commonLogger contains all common data needed for logging and contains methods used to log messages. -type commonLogger struct { - config *logConfig // Config used for logging - contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets - closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM - closedM sync.RWMutex - m sync.Mutex // Mutex for main operations - unusedLevels []bool - innerLogger innerLoggerInterface - addStackDepth int // Additional stack depth needed for correct seelog caller context detection - customContext interface{} -} - -func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger { - cLogger := new(commonLogger) - - cLogger.config = config - cLogger.contextCache = make(allowedContextCache) - cLogger.unusedLevels = make([]bool, Off) - cLogger.fillUnusedLevels() - cLogger.innerLogger = internalLogger - - return cLogger -} - -func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error { - if depth < 0 { - return fmt.Errorf("negative depth: %d", depth) - } - cLogger.m.Lock() - cLogger.addStackDepth = depth - cLogger.m.Unlock() - return nil -} - -func (cLogger *commonLogger) Tracef(format string, params ...interface{}) { - cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) -} - -func (cLogger *commonLogger) Debugf(format string, params ...interface{}) { - cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) -} - -func (cLogger *commonLogger) Infof(format string, params ...interface{}) { - cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) -} - -func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error { - message := newLogFormattedMessage(format, params) - cLogger.warnWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error { - message := newLogFormattedMessage(format, params) - cLogger.errorWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error { - message := newLogFormattedMessage(format, params) - cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Trace(v ...interface{}) { - cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) -} - -func (cLogger *commonLogger) Debug(v ...interface{}) { - cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) -} - -func (cLogger *commonLogger) Info(v ...interface{}) { - cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) -} - -func (cLogger *commonLogger) Warn(v ...interface{}) error { - message := newLogMessage(v) - cLogger.warnWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Error(v ...interface{}) error { - message := newLogMessage(v) - cLogger.errorWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) Critical(v ...interface{}) error { - message := newLogMessage(v) - cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) - return errors.New(message.String()) -} - -func (cLogger *commonLogger) SetContext(c interface{}) { - cLogger.customContext = c -} - -func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(TraceLvl, message, callDepth) -} - -func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(DebugLvl, message, callDepth) -} - -func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(InfoLvl, message, callDepth) -} - -func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(WarnLvl, message, callDepth) -} - -func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(ErrorLvl, message, callDepth) -} - -func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) { - cLogger.log(CriticalLvl, message, callDepth) - cLogger.innerLogger.Flush() -} - -func (cLogger *commonLogger) Closed() bool { - cLogger.closedM.RLock() - defer cLogger.closedM.RUnlock() - return cLogger.closed -} - -func (cLogger *commonLogger) fillUnusedLevels() { - for i := 0; i < len(cLogger.unusedLevels); i++ { - cLogger.unusedLevels[i] = true - } - - cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints) - - for _, exception := range cLogger.config.Exceptions { - cLogger.fillUnusedLevelsByContraint(exception) - } -} - -func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) { - for i := 0; i < len(cLogger.unusedLevels); i++ { - if constraint.IsAllowed(LogLevel(i)) { - cLogger.unusedLevels[i] = false - } - } -} - -// stackCallDepth is used to indicate the call depth of 'log' func. -// This depth level is used in the runtime.Caller(...) call. See -// common_context.go -> specifyContext, extractCallerInfo for details. -func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) { - if cLogger.unusedLevels[level] { - return - } - cLogger.m.Lock() - defer cLogger.m.Unlock() - - if cLogger.Closed() { - return - } - context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext) - // Context errors are not reported because there are situations - // in which context errors are normal Seelog usage cases. For - // example in executables with stripped symbols. - // Error contexts are returned instead. See common_context.go. - /*if err != nil { - reportInternalError(err) - return - }*/ - cLogger.innerLogger.innerLog(level, context, message) -} - -func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) { - defer func() { - if err := recover(); err != nil { - reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err)) - } - }() - if cLogger.config.IsAllowed(level, context) { - cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError) - } -} - -func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool { - funcMap, ok := cLogger.contextCache[context.FullPath()] - if !ok { - funcMap = make(map[string]map[LogLevel]bool, 0) - cLogger.contextCache[context.FullPath()] = funcMap - } - - levelMap, ok := funcMap[context.Func()] - if !ok { - levelMap = make(map[LogLevel]bool, 0) - funcMap[context.Func()] = levelMap - } - - isAllowValue, ok := levelMap[level] - if !ok { - isAllowValue = cLogger.config.IsAllowed(level, context) - levelMap[level] = isAllowValue - } - - return isAllowValue -} - -type logMessage struct { - params []interface{} -} - -type logFormattedMessage struct { - format string - params []interface{} -} - -func newLogMessage(params []interface{}) fmt.Stringer { - message := new(logMessage) - - message.params = params - - return message -} - -func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage { - message := new(logFormattedMessage) - - message.params = params - message.format = format - - return message -} - -func (message *logMessage) String() string { - return fmt.Sprint(message.params...) -} - -func (message *logFormattedMessage) String() string { - return fmt.Sprintf(message.format, message.params...) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_bufferedwriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_bufferedwriter.go deleted file mode 100644 index 37d75c82..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_bufferedwriter.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "bufio" - "errors" - "fmt" - "io" - "sync" - "time" -) - -// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full -type bufferedWriter struct { - flushPeriod time.Duration // data flushes interval (in microseconds) - bufferMutex *sync.Mutex // mutex for buffer operations syncronization - innerWriter io.Writer // inner writer - buffer *bufio.Writer // buffered wrapper for inner writer - bufferSize int // max size of data chunk in bytes -} - -// NewBufferedWriter creates a new buffered writer struct. -// bufferSize -- size of memory buffer in bytes -// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality -func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) { - - if innerWriter == nil { - return nil, errors.New("argument is nil: innerWriter") - } - if flushPeriod < 0 { - return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod) - } - - if bufferSize <= 0 { - return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize) - } - - buffer := bufio.NewWriterSize(innerWriter, bufferSize) - - /*if err != nil { - return nil, err - }*/ - - newWriter := new(bufferedWriter) - - newWriter.innerWriter = innerWriter - newWriter.buffer = buffer - newWriter.bufferSize = bufferSize - newWriter.flushPeriod = flushPeriod * 1e6 - newWriter.bufferMutex = new(sync.Mutex) - - if flushPeriod != 0 { - go newWriter.flushPeriodically() - } - - return newWriter, nil -} - -func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) { - bufferedLen := bufWriter.buffer.Buffered() - - n, err = bufWriter.flushInner() - if err != nil { - return - } - - written, writeErr := bufWriter.innerWriter.Write(bytes) - return bufferedLen + written, writeErr -} - -// Sends data to buffer manager. Waits until all buffers are full. -func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) { - - bufWriter.bufferMutex.Lock() - defer bufWriter.bufferMutex.Unlock() - - bytesLen := len(bytes) - - if bytesLen > bufWriter.bufferSize { - return bufWriter.writeBigChunk(bytes) - } - - if bytesLen > bufWriter.buffer.Available() { - n, err = bufWriter.flushInner() - if err != nil { - return - } - } - - bufWriter.buffer.Write(bytes) - - return len(bytes), nil -} - -func (bufWriter *bufferedWriter) Close() error { - closer, ok := bufWriter.innerWriter.(io.Closer) - if ok { - return closer.Close() - } - - return nil -} - -func (bufWriter *bufferedWriter) Flush() { - - bufWriter.bufferMutex.Lock() - defer bufWriter.bufferMutex.Unlock() - - bufWriter.flushInner() -} - -func (bufWriter *bufferedWriter) flushInner() (n int, err error) { - bufferedLen := bufWriter.buffer.Buffered() - flushErr := bufWriter.buffer.Flush() - - return bufWriter.buffer.Buffered() - bufferedLen, flushErr -} - -func (bufWriter *bufferedWriter) flushBuffer() { - bufWriter.bufferMutex.Lock() - defer bufWriter.bufferMutex.Unlock() - - bufWriter.buffer.Flush() -} - -func (bufWriter *bufferedWriter) flushPeriodically() { - if bufWriter.flushPeriod > 0 { - ticker := time.NewTicker(bufWriter.flushPeriod) - for { - <-ticker.C - bufWriter.flushBuffer() - } - } -} - -func (bufWriter *bufferedWriter) String() string { - return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_bufferedwriter_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_bufferedwriter_test.go deleted file mode 100644 index 03f74f70..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_bufferedwriter_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "testing" -) - -func TestChunkWriteOnFilling(t *testing.T) { - writer, _ := newBytesVerifier(t) - bufferedWriter, err := NewBufferedWriter(writer, 1024, 0) - - if err != nil { - t.Fatalf("Unexpected buffered writer creation error: %s", err.Error()) - } - - bytes := make([]byte, 1000) - - bufferedWriter.Write(bytes) - writer.ExpectBytes(bytes) - bufferedWriter.Write(bytes) -} - -func TestFlushByTimePeriod(t *testing.T) { - writer, _ := newBytesVerifier(t) - bufferedWriter, err := NewBufferedWriter(writer, 1024, 10) - - if err != nil { - t.Fatalf("Unexpected buffered writer creation error: %s", err.Error()) - } - - bytes := []byte("Hello") - - for i := 0; i < 2; i++ { - writer.ExpectBytes(bytes) - bufferedWriter.Write(bytes) - } -} - -func TestBigMessageMustPassMemoryBuffer(t *testing.T) { - writer, _ := newBytesVerifier(t) - bufferedWriter, err := NewBufferedWriter(writer, 1024, 0) - - if err != nil { - t.Fatalf("Unexpected buffered writer creation error: %s", err.Error()) - } - - bytes := make([]byte, 5000) - - for i := 0; i < len(bytes); i++ { - bytes[i] = uint8(i % 255) - } - - writer.ExpectBytes(bytes) - bufferedWriter.Write(bytes) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_connwriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_connwriter.go deleted file mode 100644 index d199894e..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_connwriter.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "crypto/tls" - "fmt" - "io" - "net" -) - -// connWriter is used to write to a stream-oriented network connection. -type connWriter struct { - innerWriter io.WriteCloser - reconnectOnMsg bool - reconnect bool - net string - addr string - useTLS bool - configTLS *tls.Config -} - -// Creates writer to the address addr on the network netName. -// Connection will be opened on each write if reconnectOnMsg = true -func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter { - newWriter := new(connWriter) - - newWriter.net = netName - newWriter.addr = addr - newWriter.reconnectOnMsg = reconnectOnMsg - - return newWriter -} - -// Creates a writer that uses SSL/TLS -func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter { - newWriter := new(connWriter) - - newWriter.net = netName - newWriter.addr = addr - newWriter.reconnectOnMsg = reconnectOnMsg - newWriter.useTLS = true - newWriter.configTLS = config - - return newWriter -} - -func (connWriter *connWriter) Close() error { - if connWriter.innerWriter == nil { - return nil - } - - return connWriter.innerWriter.Close() -} - -func (connWriter *connWriter) Write(bytes []byte) (n int, err error) { - if connWriter.neededConnectOnMsg() { - err = connWriter.connect() - if err != nil { - return 0, err - } - } - - if connWriter.reconnectOnMsg { - defer connWriter.innerWriter.Close() - } - - n, err = connWriter.innerWriter.Write(bytes) - if err != nil { - connWriter.reconnect = true - } - - return -} - -func (connWriter *connWriter) String() string { - return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg) -} - -func (connWriter *connWriter) connect() error { - if connWriter.innerWriter != nil { - connWriter.innerWriter.Close() - connWriter.innerWriter = nil - } - - if connWriter.useTLS { - conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS) - if err != nil { - return err - } - connWriter.innerWriter = conn - - return nil - } - - conn, err := net.Dial(connWriter.net, connWriter.addr) - if err != nil { - return err - } - - tcpConn, ok := conn.(*net.TCPConn) - if ok { - tcpConn.SetKeepAlive(true) - } - - connWriter.innerWriter = conn - - return nil -} - -func (connWriter *connWriter) neededConnectOnMsg() bool { - if connWriter.reconnect { - connWriter.reconnect = false - return true - } - - if connWriter.innerWriter == nil { - return true - } - - return connWriter.reconnectOnMsg -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_consolewriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_consolewriter.go deleted file mode 100644 index 3eb79afa..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_consolewriter.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import "fmt" - -// consoleWriter is used to write to console -type consoleWriter struct { -} - -// Creates a new console writer. Returns error, if the console writer couldn't be created. -func NewConsoleWriter() (writer *consoleWriter, err error) { - newWriter := new(consoleWriter) - - return newWriter, nil -} - -// Create folder and file on WriteLog/Write first call -func (console *consoleWriter) Write(bytes []byte) (int, error) { - return fmt.Print(string(bytes)) -} - -func (console *consoleWriter) String() string { - return "Console writer" -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_filewriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_filewriter.go deleted file mode 100644 index 8d3ae270..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_filewriter.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "io" - "os" - "path/filepath" -) - -// fileWriter is used to write to a file. -type fileWriter struct { - innerWriter io.WriteCloser - fileName string -} - -// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created. -func NewFileWriter(fileName string) (writer *fileWriter, err error) { - newWriter := new(fileWriter) - newWriter.fileName = fileName - - return newWriter, nil -} - -func (fw *fileWriter) Close() error { - if fw.innerWriter != nil { - err := fw.innerWriter.Close() - if err != nil { - return err - } - fw.innerWriter = nil - } - return nil -} - -// Create folder and file on WriteLog/Write first call -func (fw *fileWriter) Write(bytes []byte) (n int, err error) { - if fw.innerWriter == nil { - if err := fw.createFile(); err != nil { - return 0, err - } - } - return fw.innerWriter.Write(bytes) -} - -func (fw *fileWriter) createFile() error { - folder, _ := filepath.Split(fw.fileName) - var err error - - if 0 != len(folder) { - err = os.MkdirAll(folder, defaultDirectoryPermissions) - if err != nil { - return err - } - } - - // If exists - fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) - - if err != nil { - return err - } - - return nil -} - -func (fw *fileWriter) String() string { - return fmt.Sprintf("File writer: %s", fw.fileName) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_filewriter_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_filewriter_test.go deleted file mode 100644 index 1893be88..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_filewriter_test.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - "testing" -) - -const ( - messageLen = 10 -) - -var bytesFileTest = []byte(strings.Repeat("A", messageLen)) - -func TestSimpleFileWriter(t *testing.T) { - t.Logf("Starting file writer tests") - NewFileWriterTester(simplefileWriterTests, simplefileWriterGetter, t).test() -} - -//=============================================================== - -func simplefileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) { - return NewFileWriter(testCase.fileName) -} - -//=============================================================== -type fileWriterTestCase struct { - files []string - fileName string - rollingType rollingType - fileSize int64 - maxRolls int - datePattern string - writeCount int - resFiles []string - nameMode rollingNameMode -} - -func createSimplefileWriterTestCase(fileName string, writeCount int) *fileWriterTestCase { - return &fileWriterTestCase{[]string{}, fileName, rollingTypeSize, 0, 0, "", writeCount, []string{fileName}, 0} -} - -var simplefileWriterTests = []*fileWriterTestCase{ - createSimplefileWriterTestCase("log.testlog", 1), - createSimplefileWriterTestCase("log.testlog", 50), - createSimplefileWriterTestCase(filepath.Join("dir", "log.testlog"), 50), -} - -//=============================================================== - -type fileWriterTester struct { - testCases []*fileWriterTestCase - writerGetter func(*fileWriterTestCase) (io.WriteCloser, error) - t *testing.T -} - -func NewFileWriterTester( - testCases []*fileWriterTestCase, - writerGetter func(*fileWriterTestCase) (io.WriteCloser, error), - t *testing.T) *fileWriterTester { - - return &fileWriterTester{testCases, writerGetter, t} -} - -func isWriterTestFile(fn string) bool { - return strings.Contains(fn, ".testlog") -} - -func cleanupWriterTest(t *testing.T) { - toDel, err := getDirFilePaths(".", isWriterTestFile, true) - if nil != err { - t.Fatal("Cannot list files in test directory!") - } - - for _, p := range toDel { - if err = tryRemoveFile(p); nil != err { - t.Errorf("cannot remove file %s in test directory: %s", p, err.Error()) - } - } - - if err = os.RemoveAll("dir"); nil != err { - t.Errorf("cannot remove temp test directory: %s", err.Error()) - } -} - -func getWriterTestResultFiles() ([]string, error) { - var p []string - - visit := func(path string, f os.FileInfo, err error) error { - if !f.IsDir() && isWriterTestFile(path) { - abs, err := filepath.Abs(path) - if err != nil { - return fmt.Errorf("filepath.Abs failed for %s", path) - } - - p = append(p, abs) - } - - return nil - } - - err := filepath.Walk(".", visit) - if nil != err { - return nil, err - } - - return p, nil -} - -func (tester *fileWriterTester) testCase(testCase *fileWriterTestCase, testNum int) { - defer cleanupWriterTest(tester.t) - - tester.t.Logf("Start test [%v]\n", testNum) - - for _, filePath := range testCase.files { - dir, _ := filepath.Split(filePath) - - var err error - - if 0 != len(dir) { - err = os.MkdirAll(dir, defaultDirectoryPermissions) - if err != nil { - tester.t.Error(err) - return - } - } - - fi, err := os.Create(filePath) - if err != nil { - tester.t.Error(err) - return - } - - err = fi.Close() - if err != nil { - tester.t.Error(err) - return - } - } - - fwc, err := tester.writerGetter(testCase) - if err != nil { - tester.t.Error(err) - return - } - defer fwc.Close() - - tester.performWrite(fwc, testCase.writeCount) - - files, err := getWriterTestResultFiles() - if err != nil { - tester.t.Error(err) - return - } - - tester.checkRequiredFilesExist(testCase, files) - tester.checkJustRequiredFilesExist(testCase, files) - -} - -func (tester *fileWriterTester) test() { - for i, tc := range tester.testCases { - cleanupWriterTest(tester.t) - tester.testCase(tc, i) - } -} - -func (tester *fileWriterTester) performWrite(fileWriter io.Writer, count int) { - for i := 0; i < count; i++ { - _, err := fileWriter.Write(bytesFileTest) - - if err != nil { - tester.t.Error(err) - return - } - } -} - -func (tester *fileWriterTester) checkRequiredFilesExist(testCase *fileWriterTestCase, files []string) { - var found bool - for _, expected := range testCase.resFiles { - found = false - exAbs, err := filepath.Abs(expected) - if err != nil { - tester.t.Errorf("filepath.Abs failed for %s", expected) - continue - } - - for _, f := range files { - if af, e := filepath.Abs(f); e == nil { - tester.t.Log(af) - if exAbs == af { - found = true - break - } - } else { - tester.t.Errorf("filepath.Abs failed for %s", f) - } - } - - if !found { - tester.t.Errorf("expected file: %s doesn't exist. Got %v\n", exAbs, files) - } - } -} - -func (tester *fileWriterTester) checkJustRequiredFilesExist(testCase *fileWriterTestCase, files []string) { - for _, f := range files { - found := false - for _, expected := range testCase.resFiles { - - exAbs, err := filepath.Abs(expected) - if err != nil { - tester.t.Errorf("filepath.Abs failed for %s", expected) - } else { - if exAbs == f { - found = true - break - } - } - } - - if !found { - tester.t.Errorf("unexpected file: %v", f) - } - } -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_formattedwriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_formattedwriter.go deleted file mode 100644 index bf44a410..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_formattedwriter.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "errors" - "fmt" - "io" -) - -type formattedWriter struct { - writer io.Writer - formatter *formatter -} - -func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) { - if formatter == nil { - return nil, errors.New("formatter can not be nil") - } - - return &formattedWriter{writer, formatter}, nil -} - -func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error { - str := formattedWriter.formatter.Format(message, level, context) - _, err := formattedWriter.writer.Write([]byte(str)) - return err -} - -func (formattedWriter *formattedWriter) String() string { - return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter) -} - -func (formattedWriter *formattedWriter) Writer() io.Writer { - return formattedWriter.writer -} - -func (formattedWriter *formattedWriter) Format() *formatter { - return formattedWriter.formatter -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_formattedwriter_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_formattedwriter_test.go deleted file mode 100644 index 351ac4ef..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_formattedwriter_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "testing" -) - -func TestformattedWriter(t *testing.T) { - formatStr := "%Level %LEVEL %Msg" - message := "message" - var logLevel = LogLevel(TraceLvl) - - bytesVerifier, err := newBytesVerifier(t) - if err != nil { - t.Error(err) - return - } - - formatter, err := NewFormatter(formatStr) - if err != nil { - t.Error(err) - return - } - - writer, err := NewFormattedWriter(bytesVerifier, formatter) - if err != nil { - t.Error(err) - return - } - - context, err := currentContext(nil) - if err != nil { - t.Error(err) - return - } - - logMessage := formatter.Format(message, logLevel, context) - - bytesVerifier.ExpectBytes([]byte(logMessage)) - writer.Write(message, logLevel, context) - bytesVerifier.MustNotExpect() -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_rollingfilewriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_rollingfilewriter.go deleted file mode 100644 index 2422a67c..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_rollingfilewriter.go +++ /dev/null @@ -1,625 +0,0 @@ -// Copyright (c) 2013 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" -) - -// Common constants -const ( - rollingLogHistoryDelimiter = "." -) - -// Types of the rolling writer: roll by date, by time, etc. -type rollingType uint8 - -const ( - rollingTypeSize = iota - rollingTypeTime -) - -// Types of the rolled file naming mode: prefix, postfix, etc. -type rollingNameMode uint8 - -const ( - rollingNameModePostfix = iota - rollingNameModePrefix -) - -var rollingNameModesStringRepresentation = map[rollingNameMode]string{ - rollingNameModePostfix: "postfix", - rollingNameModePrefix: "prefix", -} - -func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) { - for tp, tpStr := range rollingNameModesStringRepresentation { - if tpStr == rollingNameStr { - return tp, true - } - } - - return 0, false -} - -type rollingIntervalType uint8 - -const ( - rollingIntervalAny = iota - rollingIntervalDaily -) - -var rollingInvervalTypesStringRepresentation = map[rollingIntervalType]string{ - rollingIntervalDaily: "daily", -} - -func rollingIntervalTypeFromString(rollingTypeStr string) (rollingIntervalType, bool) { - for tp, tpStr := range rollingInvervalTypesStringRepresentation { - if tpStr == rollingTypeStr { - return tp, true - } - } - - return 0, false -} - -var rollingTypesStringRepresentation = map[rollingType]string{ - rollingTypeSize: "size", - rollingTypeTime: "date", -} - -func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) { - for tp, tpStr := range rollingTypesStringRepresentation { - if tpStr == rollingTypeStr { - return tp, true - } - } - - return 0, false -} - -// Old logs archivation type. -type rollingArchiveType uint8 - -const ( - rollingArchiveNone = iota - rollingArchiveZip -) - -var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{ - rollingArchiveNone: "none", - rollingArchiveZip: "zip", -} - -func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) { - for tp, tpStr := range rollingArchiveTypesStringRepresentation { - if tpStr == rollingArchiveTypeStr { - return tp, true - } - } - - return 0, false -} - -// Default names for different archivation types -var rollingArchiveTypesDefaultNames = map[rollingArchiveType]string{ - rollingArchiveZip: "log.zip", -} - -// rollerVirtual is an interface that represents all virtual funcs that are -// called in different rolling writer subtypes. -type rollerVirtual interface { - needsToRoll() (bool, error) // Returns true if needs to switch to another file. - isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok. - sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger. - - // Creates a new froll history file using the contents of current file and special filename of the latest roll (prefix/ postfix). - // If lastRollName is empty (""), then it means that there is no latest roll (current is the first one) - getNewHistoryRollFileName(lastRollName string) string - getCurrentModifiedFileName(originalFileName string, first bool) (string, error) // Returns filename modified according to specific logger rules -} - -// rollingFileWriter writes received messages to a file, until time interval passes -// or file exceeds a specified limit. After that the current log file is renamed -// and writer starts to log into a new file. You can set a limit for such renamed -// files count, if you want, and then the rolling writer would delete older ones when -// the files count exceed the specified limit. -type rollingFileWriter struct { - fileName string // current file name. May differ from original in date rolling loggers - originalFileName string // original one - currentDirPath string - currentFile *os.File - currentFileSize int64 - rollingType rollingType // Rolling mode (Files roll by size/date/...) - archiveType rollingArchiveType - archivePath string - maxRolls int - nameMode rollingNameMode - self rollerVirtual // Used for virtual calls -} - -func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode) (*rollingFileWriter, error) { - rw := new(rollingFileWriter) - rw.currentDirPath, rw.fileName = filepath.Split(fpath) - if len(rw.currentDirPath) == 0 { - rw.currentDirPath = "." - } - rw.originalFileName = rw.fileName - - rw.rollingType = rtype - rw.archiveType = atype - rw.archivePath = apath - rw.nameMode = namemode - rw.maxRolls = maxr - return rw, nil -} - -func (rw *rollingFileWriter) hasRollName(file string) bool { - switch rw.nameMode { - case rollingNameModePostfix: - rname := rw.originalFileName + rollingLogHistoryDelimiter - return strings.HasPrefix(file, rname) - case rollingNameModePrefix: - rname := rollingLogHistoryDelimiter + rw.originalFileName - return strings.HasSuffix(file, rname) - } - return false -} - -func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string { - switch rw.nameMode { - case rollingNameModePostfix: - return originalName + rollingLogHistoryDelimiter + rollname - case rollingNameModePrefix: - return rollname + rollingLogHistoryDelimiter + originalName - } - return "" -} - -func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) { - files, err := getDirFilePaths(rw.currentDirPath, nil, true) - if err != nil { - return nil, err - } - var validRollNames []string - for _, file := range files { - if file != rw.fileName && rw.hasRollName(file) { - rname := rw.getFileRollName(file) - if rw.self.isFileRollNameValid(rname) { - validRollNames = append(validRollNames, rname) - } - } - } - sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames) - if err != nil { - return nil, err - } - validSortedFiles := make([]string, len(sortedTails)) - for i, v := range sortedTails { - validSortedFiles[i] = rw.createFullFileName(rw.originalFileName, v) - } - return validSortedFiles, nil -} - -func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error { - var err error - - if len(rw.currentDirPath) != 0 { - err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions) - - if err != nil { - return err - } - } - - rw.fileName, err = rw.self.getCurrentModifiedFileName(rw.originalFileName, first) - if err != nil { - return err - } - filePath := filepath.Join(rw.currentDirPath, rw.fileName) - - // If exists - stat, err := os.Lstat(filePath) - if err == nil { - rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, defaultFilePermissions) - - stat, err = os.Lstat(filePath) - if err != nil { - return err - } - - rw.currentFileSize = stat.Size() - } else { - rw.currentFile, err = os.Create(filePath) - rw.currentFileSize = 0 - } - if err != nil { - return err - } - - return nil -} - -func (rw *rollingFileWriter) deleteOldRolls(history []string) error { - if rw.maxRolls <= 0 { - return nil - } - - rollsToDelete := len(history) - rw.maxRolls - if rollsToDelete <= 0 { - return nil - } - - switch rw.archiveType { - case rollingArchiveZip: - var files map[string][]byte - - // If archive exists - _, err := os.Lstat(rw.archivePath) - if nil == err { - // Extract files and content from it - files, err = unzip(rw.archivePath) - if err != nil { - return err - } - - // Remove the original file - err = tryRemoveFile(rw.archivePath) - if err != nil { - return err - } - } else { - files = make(map[string][]byte) - } - - // Add files to the existing files map, filled above - for i := 0; i < rollsToDelete; i++ { - rollPath := filepath.Join(rw.currentDirPath, history[i]) - bts, err := ioutil.ReadFile(rollPath) - if err != nil { - return err - } - - files[rollPath] = bts - } - - // Put the final file set to zip file. - if err = createZip(rw.archivePath, files); err != nil { - return err - } - } - var err error - // In all cases (archive files or not) the files should be deleted. - for i := 0; i < rollsToDelete; i++ { - // Try best to delete files without breaking the loop. - if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil { - reportInternalError(err) - } - } - - return nil -} - -func (rw *rollingFileWriter) getFileRollName(fileName string) string { - switch rw.nameMode { - case rollingNameModePostfix: - return fileName[len(rw.originalFileName+rollingLogHistoryDelimiter):] - case rollingNameModePrefix: - return fileName[:len(fileName)-len(rw.originalFileName+rollingLogHistoryDelimiter)] - } - return "" -} - -func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) { - if rw.currentFile == nil { - err := rw.createFileAndFolderIfNeeded(true) - if err != nil { - return 0, err - } - } - // needs to roll if: - // * file roller max file size exceeded OR - // * time roller interval passed - nr, err := rw.self.needsToRoll() - if err != nil { - return 0, err - } - if nr { - // First, close current file. - err = rw.currentFile.Close() - if err != nil { - return 0, err - } - // Current history of all previous log files. - // For file roller it may be like this: - // * ... - // * file.log.4 - // * file.log.5 - // * file.log.6 - // - // For date roller it may look like this: - // * ... - // * file.log.11.Aug.13 - // * file.log.15.Aug.13 - // * file.log.16.Aug.13 - // Sorted log history does NOT include current file. - history, err := rw.getSortedLogHistory() - if err != nil { - return 0, err - } - // Renames current file to create a new roll history entry - // For file roller it may be like this: - // * ... - // * file.log.4 - // * file.log.5 - // * file.log.6 - // n file.log.7 <---- RENAMED (from file.log) - // Time rollers that doesn't modify file names (e.g. 'date' roller) skip this logic. - var newHistoryName string - var newRollMarkerName string - if len(history) > 0 { - // Create new rname name using last history file name - newRollMarkerName = rw.self.getNewHistoryRollFileName(rw.getFileRollName(history[len(history)-1])) - } else { - // Create first rname name - newRollMarkerName = rw.self.getNewHistoryRollFileName("") - } - if len(newRollMarkerName) != 0 { - newHistoryName = rw.createFullFileName(rw.fileName, newRollMarkerName) - } else { - newHistoryName = rw.fileName - } - if newHistoryName != rw.fileName { - err = os.Rename(filepath.Join(rw.currentDirPath, rw.fileName), filepath.Join(rw.currentDirPath, newHistoryName)) - if err != nil { - return 0, err - } - } - // Finally, add the newly added history file to the history archive - // and, if after that the archive exceeds the allowed max limit, older rolls - // must the removed/archived. - history = append(history, newHistoryName) - if len(history) > rw.maxRolls { - err = rw.deleteOldRolls(history) - if err != nil { - return 0, err - } - } - - err = rw.createFileAndFolderIfNeeded(false) - if err != nil { - return 0, err - } - } - - rw.currentFileSize += int64(len(bytes)) - return rw.currentFile.Write(bytes) -} - -func (rw *rollingFileWriter) Close() error { - if rw.currentFile != nil { - e := rw.currentFile.Close() - if e != nil { - return e - } - rw.currentFile = nil - } - return nil -} - -// ============================================================================================= -// Different types of rolling writers -// ============================================================================================= - -// -------------------------------------------------- -// Rolling writer by SIZE -// -------------------------------------------------- - -// rollingFileWriterSize performs roll when file exceeds a specified limit. -type rollingFileWriterSize struct { - *rollingFileWriter - maxFileSize int64 -} - -func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode) (*rollingFileWriterSize, error) { - rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode) - if err != nil { - return nil, err - } - rws := &rollingFileWriterSize{rw, maxSize} - rws.self = rws - return rws, nil -} - -func (rws *rollingFileWriterSize) needsToRoll() (bool, error) { - return rws.currentFileSize >= rws.maxFileSize, nil -} - -func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool { - if len(rname) == 0 { - return false - } - _, err := strconv.Atoi(rname) - return err == nil -} - -type rollSizeFileTailsSlice []string - -func (p rollSizeFileTailsSlice) Len() int { return len(p) } -func (p rollSizeFileTailsSlice) Less(i, j int) bool { - v1, _ := strconv.Atoi(p[i]) - v2, _ := strconv.Atoi(p[j]) - return v1 < v2 -} -func (p rollSizeFileTailsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) { - ss := rollSizeFileTailsSlice(fs) - sort.Sort(ss) - return ss, nil -} - -func (rws *rollingFileWriterSize) getNewHistoryRollFileName(lastRollName string) string { - v := 0 - if len(lastRollName) != 0 { - v, _ = strconv.Atoi(lastRollName) - } - return fmt.Sprintf("%d", v+1) -} - -func (rws *rollingFileWriterSize) getCurrentModifiedFileName(originalFileName string, first bool) (string, error) { - return originalFileName, nil -} - -func (rws *rollingFileWriterSize) String() string { - return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v", - rws.fileName, - rollingArchiveTypesStringRepresentation[rws.archiveType], - rws.archivePath, - rws.maxFileSize, - rws.maxRolls) -} - -// -------------------------------------------------- -// Rolling writer by TIME -// -------------------------------------------------- - -// rollingFileWriterTime performs roll when a specified time interval has passed. -type rollingFileWriterTime struct { - *rollingFileWriter - timePattern string - interval rollingIntervalType - currentTimeFileName string -} - -func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int, - timePattern string, interval rollingIntervalType, namemode rollingNameMode) (*rollingFileWriterTime, error) { - - rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode) - if err != nil { - return nil, err - } - rws := &rollingFileWriterTime{rw, timePattern, interval, ""} - rws.self = rws - return rws, nil -} - -func (rwt *rollingFileWriterTime) needsToRoll() (bool, error) { - switch rwt.nameMode { - case rollingNameModePostfix: - if rwt.originalFileName+rollingLogHistoryDelimiter+time.Now().Format(rwt.timePattern) == rwt.fileName { - return false, nil - } - case rollingNameModePrefix: - if time.Now().Format(rwt.timePattern)+rollingLogHistoryDelimiter+rwt.originalFileName == rwt.fileName { - return false, nil - } - } - if rwt.interval == rollingIntervalAny { - return true, nil - } - - tprev, err := time.ParseInLocation(rwt.timePattern, rwt.getFileRollName(rwt.fileName), time.Local) - if err != nil { - return false, err - } - - diff := time.Now().Sub(tprev) - switch rwt.interval { - case rollingIntervalDaily: - return diff >= 24*time.Hour, nil - } - return false, fmt.Errorf("unknown interval type: %d", rwt.interval) -} - -func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool { - if len(rname) == 0 { - return false - } - _, err := time.ParseInLocation(rwt.timePattern, rname, time.Local) - return err == nil -} - -type rollTimeFileTailsSlice struct { - data []string - pattern string -} - -func (p rollTimeFileTailsSlice) Len() int { return len(p.data) } - -func (p rollTimeFileTailsSlice) Less(i, j int) bool { - t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local) - t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local) - return t1.Before(t2) -} - -func (p rollTimeFileTailsSlice) Swap(i, j int) { p.data[i], p.data[j] = p.data[j], p.data[i] } - -func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) { - ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern} - sort.Sort(ss) - return ss.data, nil -} - -func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(lastRollName string) string { - return "" -} - -func (rwt *rollingFileWriterTime) getCurrentModifiedFileName(originalFileName string, first bool) (string, error) { - if first { - history, err := rwt.getSortedLogHistory() - if err != nil { - return "", err - } - if len(history) > 0 { - return history[len(history)-1], nil - } - } - - switch rwt.nameMode { - case rollingNameModePostfix: - return originalFileName + rollingLogHistoryDelimiter + time.Now().Format(rwt.timePattern), nil - case rollingNameModePrefix: - return time.Now().Format(rwt.timePattern) + rollingLogHistoryDelimiter + originalFileName, nil - } - return "", fmt.Errorf("Unknown rolling writer mode. Either postfix or prefix must be used") -} - -func (rwt *rollingFileWriterTime) String() string { - return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, maxInterval: %v, pattern: %s, maxRolls: %v", - rwt.fileName, - rollingArchiveTypesStringRepresentation[rwt.archiveType], - rwt.archivePath, - rwt.interval, - rwt.timePattern, - rwt.maxRolls) -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_rollingfilewriter_test.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_rollingfilewriter_test.go deleted file mode 100644 index 9ca91ae0..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_rollingfilewriter_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "fmt" - "io" - "testing" -) - -// fileWriterTestCase is declared in writers_filewriter_test.go - -func createRollingSizeFileWriterTestCase( - files []string, - fileName string, - fileSize int64, - maxRolls int, - writeCount int, - resFiles []string, - nameMode rollingNameMode) *fileWriterTestCase { - - return &fileWriterTestCase{files, fileName, rollingTypeSize, fileSize, maxRolls, "", writeCount, resFiles, nameMode} -} - -func createRollingDatefileWriterTestCase( - files []string, - fileName string, - datePattern string, - writeCount int, - resFiles []string, - nameMode rollingNameMode) *fileWriterTestCase { - - return &fileWriterTestCase{files, fileName, rollingTypeTime, 0, 0, datePattern, writeCount, resFiles, nameMode} -} - -func TestRollingFileWriter(t *testing.T) { - t.Logf("Starting rolling file writer tests") - NewFileWriterTester(rollingfileWriterTests, rollingFileWriterGetter, t).test() -} - -//=============================================================== - -func rollingFileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) { - if testCase.rollingType == rollingTypeSize { - return NewRollingFileWriterSize(testCase.fileName, rollingArchiveNone, "", testCase.fileSize, testCase.maxRolls, testCase.nameMode) - } else if testCase.rollingType == rollingTypeTime { - return NewRollingFileWriterTime(testCase.fileName, rollingArchiveNone, "", -1, testCase.datePattern, rollingIntervalDaily, testCase.nameMode) - } - - return nil, fmt.Errorf("incorrect rollingType") -} - -//=============================================================== -var rollingfileWriterTests = []*fileWriterTestCase{ - createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 10, 1, []string{"log.testlog"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 10, 2, []string{"log.testlog", "log.testlog.1"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{"1.log.testlog"}, "log.testlog", 10, 10, 2, []string{"log.testlog", "1.log.testlog", "2.log.testlog"}, rollingNameModePrefix), - createRollingSizeFileWriterTestCase([]string{"log.testlog.1"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.2"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.1"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{"log.testlog.9"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.10"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{"log.testlog.a", "log.testlog.1b"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.1", "log.testlog.a", "log.testlog.1b"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/1.log.testlog`}, rollingNameModePrefix), - createRollingSizeFileWriterTestCase([]string{`dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{`dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, `./log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{"dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, "dir/dir/log.testlog", 10, 1, 2, []string{"dir/dir/log.testlog", "dir/dir/log.testlog.1", "dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/log.testlog.1`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{`././dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{`././dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{}, `././log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix), - createRollingSizeFileWriterTestCase([]string{"././dir/dir/log.testlog.a", "././dir/dir/log.testlog.1b"}, "dir/dir/log.testlog", 10, 1, 2, []string{"dir/dir/log.testlog", "dir/dir/log.testlog.1", "dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, rollingNameModePostfix), - // ==================== -} diff --git a/Godeps/_workspace/src/github.com/cihub/seelog/writers_smtpwriter.go b/Godeps/_workspace/src/github.com/cihub/seelog/writers_smtpwriter.go deleted file mode 100644 index 31b79438..00000000 --- a/Godeps/_workspace/src/github.com/cihub/seelog/writers_smtpwriter.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2012 - Cloud Instruments Co., Ltd. -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package seelog - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net/smtp" - "path/filepath" - "strings" -) - -const ( - // Default subject phrase for sending emails. - DefaultSubjectPhrase = "Diagnostic message from server: " - - // Message subject pattern composed according to RFC 5321. - rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n" -) - -// smtpWriter is used to send emails via given SMTP-server. -type smtpWriter struct { - auth smtp.Auth - hostName string - hostPort string - hostNameWithPort string - senderAddress string - senderName string - recipientAddresses []string - caCertDirPaths []string - mailHeaders []string - subject string -} - -// NewSMTPWriter returns a new SMTP-writer. -func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter { - return &smtpWriter{ - auth: smtp.PlainAuth("", un, pwd, hn), - hostName: hn, - hostPort: hp, - hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp), - senderAddress: sa, - senderName: sn, - recipientAddresses: ras, - caCertDirPaths: cacdps, - subject: subj, - mailHeaders: headers, - } -} - -func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte { - headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject) - // Build header lines if configured. - if headers != nil && len(headers) > 0 { - headerLines += strings.Join(headers, "\n") - headerLines += "\n" - } - return append([]byte(headerLines), body...) -} - -// getTLSConfig gets paths of PEM files with certificates, -// host server name and tries to create an appropriate TLS.Config. -func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) { - if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 { - err = errors.New("invalid PEM file paths") - return - } - pemEncodedContent := []byte{} - var ( - e error - bytes []byte - ) - // Create a file-filter-by-extension, set aside non-pem files. - pemFilePathFilter := func(fp string) bool { - if filepath.Ext(fp) == ".pem" { - return true - } - return false - } - for _, pemFileDirPath := range pemFileDirPaths { - pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false) - if err != nil { - return nil, err - } - - // Put together all the PEM files to decode them as a whole byte slice. - for _, pfp := range pemFilePaths { - if bytes, e = ioutil.ReadFile(pfp); e == nil { - pemEncodedContent = append(pemEncodedContent, bytes...) - } else { - return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error()) - } - } - } - config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName} - isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent) - if !isAppended { - // Extract this into a separate error. - err = errors.New("invalid PEM content") - return - } - return -} - -// SendMail accepts TLS configuration, connects to the server at addr, -// switches to TLS if possible, authenticates with mechanism a if possible, -// and then sends an email from address from, to addresses to, with message msg. -func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error { - c, err := smtp.Dial(addr) - if err != nil { - return err - } - // Check if the server supports STARTTLS extension. - if ok, _ := c.Extension("STARTTLS"); ok { - if err = c.StartTLS(config); err != nil { - return err - } - } - // Check if the server supports AUTH extension and use given smtp.Auth. - if a != nil { - if isSupported, _ := c.Extension("AUTH"); isSupported { - if err = c.Auth(a); err != nil { - return err - } - } - } - // Portion of code from the official smtp.SendMail function, - // see http://golang.org/src/pkg/net/smtp/smtp.go. - if err = c.Mail(from); err != nil { - return err - } - for _, addr := range to { - if err = c.Rcpt(addr); err != nil { - return err - } - } - w, err := c.Data() - if err != nil { - return err - } - _, err = w.Write(msg) - if err != nil { - return err - } - err = w.Close() - if err != nil { - return err - } - return c.Quit() -} - -// Write pushes a text message properly composed according to RFC 5321 -// to a post server, which sends it to the recipients. -func (smtpw *smtpWriter) Write(data []byte) (int, error) { - var err error - - if smtpw.caCertDirPaths == nil { - err = smtp.SendMail( - smtpw.hostNameWithPort, - smtpw.auth, - smtpw.senderAddress, - smtpw.recipientAddresses, - prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), - ) - } else { - config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName) - if e != nil { - return 0, e - } - err = sendMailWithTLSConfig( - config, - smtpw.hostNameWithPort, - smtpw.auth, - smtpw.senderAddress, - smtpw.recipientAddresses, - prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), - ) - } - if err != nil { - return 0, err - } - return len(data), nil -} - -// Close closes down SMTP-connection. -func (smtpw *smtpWriter) Close() error { - // Do nothing as Write method opens and closes connection automatically. - return nil -} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore new file mode 100644 index 00000000..7adca943 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/Makefile b/Godeps/_workspace/src/github.com/go-ini/ini/Makefile new file mode 100644 index 00000000..ac034e52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README.md b/Godeps/_workspace/src/github.com/go-ini/ini/README.md new file mode 100644 index 00000000..a87cca23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README.md @@ -0,0 +1,632 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 00000000..75c10051 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,619 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go new file mode 100644 index 00000000..4ead9f00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go @@ -0,0 +1,462 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.10.1" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + // Whether the parser should ignore nonexistent files or return error. + looseMode bool + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, looseMode bool) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + looseMode: looseMode, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +func loadSources(looseMode bool, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, looseMode) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return loadSources(false, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return loadSources(true, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.looseMode { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + // Count and generate alignment length and buffer spaces + alignLength := 0 + if PrettyFormat { + for i := 0; i < len(sec.keyList); i++ { + if len(sec.keyList[i]) > alignLength { + alignLength = len(sec.keyList[i]) + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/key.go b/Godeps/_workspace/src/github.com/go-ini/ini/key.go new file mode 100644 index 00000000..7cbccd38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/key.go @@ -0,0 +1,616 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/parser.go b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go new file mode 100644 index 00000000..1c1bf91f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go @@ -0,0 +1,312 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines + if line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.IndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + section, err = f.NewSection(string(line[1:closeIdx])) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:]) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/section.go b/Godeps/_workspace/src/github.com/go-ini/ini/section.go new file mode 100644 index 00000000..ed8cbdb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/section.go @@ -0,0 +1,177 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct.go b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go new file mode 100644 index 00000000..3fb92c39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go @@ -0,0 +1,351 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 00000000..531fcc11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 00000000..1f980775 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 00000000..b03310a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 00000000..a828d284 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 00000000..187ef676 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 00000000..9cfa988b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 00000000..1cd2d239 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 00000000..9b7cd89b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 00000000..13c74604 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 00000000..817900c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 00000000..1240a175 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 00000000..dae79cbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 00000000..ddc1b7d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/Godeps/_workspace/src/github.com/opsgenie/opsgenie-go-sdk/logging/log.go b/Godeps/_workspace/src/github.com/opsgenie/opsgenie-go-sdk/logging/log.go deleted file mode 100644 index bb0ee9e3..00000000 --- a/Godeps/_workspace/src/github.com/opsgenie/opsgenie-go-sdk/logging/log.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2015 OpsGenie. All rights reserved. -Use of this source code is governed by a Apache Software -license that can be found in the LICENSE file. -*/ - -//Package logging provides log interface. -package logging - -import ( - "fmt" - - "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/cihub/seelog" -) - -// logger is the internal logger object. -var logger seelog.LoggerInterface - -func init() { - DisableLog() -} - -// DisableLog disables all library log output. -func DisableLog() { - logger = seelog.Disabled -} - -// UseLogger is a wrapper for Seelog's UseLogger function. It sets the newLogger as the current logger. -func UseLogger(newLogger seelog.LoggerInterface) { - logger = newLogger - seelog.UseLogger(logger) -} - -// Logger returns internal logger object to achieve logging. -func Logger() seelog.LoggerInterface { - return logger -} - -// ConfigureLogger configures the new logger according to the configuration and sets it as the current logger. -func ConfigureLogger(testConfig []byte) { - loggr, err := seelog.LoggerFromConfigAsBytes([]byte(testConfig)) - if err != nil { - fmt.Printf("error occured: %s\n", err.Error()) - } - UseLogger(loggr) -} - -// FlushLog is a wrapper for seelog's Flush function. It flushes all the messages in the logger. -func FlushLog() { - logger.Flush() -} diff --git a/notifier/aws-sns-notifier.go b/notifier/aws-sns-notifier.go index b9878ece..46e8e218 100644 --- a/notifier/aws-sns-notifier.go +++ b/notifier/aws-sns-notifier.go @@ -3,9 +3,9 @@ package notifier import ( "fmt" log "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/Sirupsen/logrus" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/sns" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session" + "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sns" ) type AwsSnsNotifier struct { diff --git a/notifier/email-notifier.go b/notifier/email-notifier.go index b113f7f5..7854d74c 100644 --- a/notifier/email-notifier.go +++ b/notifier/email-notifier.go @@ -10,6 +10,9 @@ import ( log "github.com/AcalephStorage/consul-alerts/Godeps/_workspace/src/github.com/Sirupsen/logrus" ) +var sendMail = smtp.SendMail + +// EmailNotifier sends email notifications type EmailNotifier struct { ClusterName string Template string @@ -90,8 +93,15 @@ func (emailNotifier *EmailNotifier) Notify(alerts Messages) bool { msg += body.String() addr := fmt.Sprintf("%s:%d", emailNotifier.Url, emailNotifier.Port) - auth := smtp.PlainAuth("", emailNotifier.Username, emailNotifier.Password, emailNotifier.Url) - if err := smtp.SendMail(addr, auth, emailNotifier.SenderEmail, emailNotifier.Receivers, []byte(msg)); err != nil { + + var auth smtp.Auth + if emailNotifier.Username == "" || emailNotifier.Password == "" { + auth = nil + } else { + auth = smtp.PlainAuth("", emailNotifier.Username, emailNotifier.Password, emailNotifier.Url) + } + + if err := sendMail(addr, auth, emailNotifier.SenderEmail, emailNotifier.Receivers, []byte(msg)); err != nil { log.Println("Unable to send notification:", err) return false } diff --git a/notifier/email-notifier_test.go b/notifier/email-notifier_test.go new file mode 100644 index 00000000..55a27e40 --- /dev/null +++ b/notifier/email-notifier_test.go @@ -0,0 +1,53 @@ +package notifier + +import ( + "net/smtp" + "testing" +) + +func TestNotify_AuthMustBeNilIfNoUsernameIsProvided(t *testing.T) { + auth := notifyAndReturnAuth(t, EmailNotifier{Password: "some password"}) + + if auth != nil { + t.Error("auth must be nil if username is nil") + } +} + +func TestNotify_AuthMustBeNilIfNoPasswordIsProvided(t *testing.T) { + auth := notifyAndReturnAuth(t, EmailNotifier{Username: "some username"}) + + if auth != nil { + t.Error("auth must be nil if password is nil") + } +} + +func TestNotify_AuthMustNotBeNilIfUsernameAndPasswordAreProvided(t *testing.T) { + auth := notifyAndReturnAuth(t, EmailNotifier{Username: "some username", Password: "some password"}) + + if auth == nil { + t.Error("auth must not be nil if both username and password are not nil") + } +} + +func notifyAndReturnAuth(t *testing.T, notifier EmailNotifier) smtp.Auth { + oldSendMail := sendMail + defer func() { + sendMail = oldSendMail + }() + + var passedAuth smtp.Auth + + sendMail = func(addr string, a smtp.Auth, from string, to []string, msg []byte) error { + passedAuth = a + return nil + } + + alerts := make(Messages, 0) + ret := notifier.Notify(alerts) + + if !ret { + t.Error("Notify must return true") + } + + return passedAuth +}