From 8cf377e307555f2eceeed6bf65a9e178e27e8f81 Mon Sep 17 00:00:00 2001 From: Anubhav Mishra Date: Mon, 30 Oct 2017 21:22:22 -0700 Subject: [PATCH 1/2] adding vendor directory --- Gopkg.lock | 10 +- .../github.com/apparentlymart/go-cidr/LICENSE | 19 + .../apparentlymart/go-cidr/cidr/cidr.go | 112 + .../apparentlymart/go-cidr/cidr/wrangling.go | 38 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 145 + .../aws/aws-sdk-go/aws/awserr/types.go | 194 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 + .../aws-sdk-go/aws/awsutil/string_value.go | 89 + .../aws/aws-sdk-go/aws/client/client.go | 90 + .../aws-sdk-go/aws/client/default_retryer.go | 96 + .../aws/client/metadata/client_info.go | 12 + .../github.com/aws/aws-sdk-go/aws/config.go | 470 + .../aws/aws-sdk-go/aws/convert_types.go | 369 + .../aws-sdk-go/aws/corehandlers/handlers.go | 226 + .../aws/corehandlers/param_validator.go | 17 + .../aws/credentials/chain_provider.go | 102 + .../aws-sdk-go/aws/credentials/credentials.go | 246 + .../ec2rolecreds/ec2_role_provider.go | 178 + .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/credentials/env_provider.go | 78 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../shared_credentials_provider.go | 151 + .../aws/credentials/static_provider.go | 57 + .../stscreds/assume_role_provider.go | 298 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 163 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 162 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 + .../github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../github.com/aws/aws-sdk-go/aws/logger.go | 112 + .../aws/aws-sdk-go/aws/request/handlers.go | 225 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 58 + .../aws/aws-sdk-go/aws/request/request.go | 575 + .../aws/request/request_pagination.go | 236 + .../aws/aws-sdk-go/aws/request/retryer.go | 154 + .../aws/aws-sdk-go/aws/request/validation.go | 234 + .../aws/aws-sdk-go/aws/session/doc.go | 273 + .../aws/aws-sdk-go/aws/session/env_config.go | 208 + .../aws/aws-sdk-go/aws/session/session.go | 590 + .../aws-sdk-go/aws/session/shared_config.go | 295 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 761 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 118 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../private/protocol/idempotency.go | 75 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 237 + .../private/protocol/query/unmarshal.go | 35 + .../private/protocol/query/unmarshal_error.go | 66 + .../aws-sdk-go/private/protocol/rest/build.go | 290 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 227 + .../private/protocol/restxml/restxml.go | 69 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 296 + .../private/protocol/xml/xmlutil/unmarshal.go | 260 + .../protocol/xml/xmlutil/xml_to_struct.go | 147 + .../aws/aws-sdk-go/service/s3/api.go | 19245 ++++++++++++++++ .../aws-sdk-go/service/s3/bucket_location.go | 106 + .../aws/aws-sdk-go/service/s3/content_md5.go | 36 + .../aws-sdk-go/service/s3/customizations.go | 46 + .../service/s3/host_style_bucket.go | 162 + .../service/s3/platform_handlers.go | 8 + .../service/s3/platform_handlers_go1.6.go | 28 + .../aws/aws-sdk-go/service/s3/service.go | 93 + .../aws/aws-sdk-go/service/s3/sse.go | 44 + .../aws-sdk-go/service/s3/statusok_error.go | 35 + .../aws-sdk-go/service/s3/unmarshal_error.go | 103 + .../aws/aws-sdk-go/service/s3/waiters.go | 214 + .../aws/aws-sdk-go/service/sts/api.go | 2365 ++ .../aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/service.go | 93 + vendor/github.com/bgentry/go-netrc/LICENSE | 20 + .../bgentry/go-netrc/netrc/netrc.go | 510 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/README.md | 740 + vendor/github.com/go-ini/ini/README_ZH.md | 727 + vendor/github.com/go-ini/ini/ini.go | 556 + vendor/github.com/go-ini/ini/parser.go | 361 + vendor/github.com/go-ini/ini/struct.go | 450 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + vendor/github.com/hashicorp/go-getter/LICENSE | 354 + .../github.com/hashicorp/go-getter/README.md | 257 + .../hashicorp/go-getter/appveyor.yml | 16 + .../github.com/hashicorp/go-getter/client.go | 335 + .../hashicorp/go-getter/client_mode.go | 24 + .../hashicorp/go-getter/copy_dir.go | 78 + .../hashicorp/go-getter/decompress.go | 29 + .../hashicorp/go-getter/decompress_bzip2.go | 45 + .../hashicorp/go-getter/decompress_gzip.go | 49 + .../hashicorp/go-getter/decompress_tbz2.go | 33 + .../hashicorp/go-getter/decompress_testing.go | 135 + .../hashicorp/go-getter/decompress_tgz.go | 39 + .../hashicorp/go-getter/decompress_zip.go | 96 + .../github.com/hashicorp/go-getter/detect.go | 97 + .../hashicorp/go-getter/detect_bitbucket.go | 66 + .../hashicorp/go-getter/detect_file.go | 67 + .../hashicorp/go-getter/detect_github.go | 73 + .../hashicorp/go-getter/detect_s3.go | 61 + .../hashicorp/go-getter/folder_storage.go | 65 + vendor/github.com/hashicorp/go-getter/get.go | 139 + .../hashicorp/go-getter/get_file.go | 32 + .../hashicorp/go-getter/get_file_unix.go | 103 + .../hashicorp/go-getter/get_file_windows.go | 120 + .../github.com/hashicorp/go-getter/get_git.go | 225 + .../github.com/hashicorp/go-getter/get_hg.go | 131 + .../hashicorp/go-getter/get_http.go | 227 + .../hashicorp/go-getter/get_mock.go | 52 + .../github.com/hashicorp/go-getter/get_s3.go | 243 + .../hashicorp/go-getter/helper/url/url.go | 14 + .../go-getter/helper/url/url_unix.go | 11 + .../go-getter/helper/url/url_windows.go | 40 + .../github.com/hashicorp/go-getter/netrc.go | 67 + .../github.com/hashicorp/go-getter/source.go | 36 + .../github.com/hashicorp/go-getter/storage.go | 13 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/README.md | 97 + .../hashicorp/go-multierror/append.go | 41 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/multierror.go | 51 + .../hashicorp/go-multierror/prefix.go | 37 + vendor/github.com/hashicorp/go-uuid/LICENSE | 363 + vendor/github.com/hashicorp/go-uuid/README.md | 8 + vendor/github.com/hashicorp/go-uuid/uuid.go | 65 + .../github.com/hashicorp/go-version/LICENSE | 354 + .../github.com/hashicorp/go-version/README.md | 65 + .../hashicorp/go-version/constraint.go | 178 + .../hashicorp/go-version/version.go | 322 + .../go-version/version_collection.go | 17 + vendor/github.com/hashicorp/hcl/LICENSE | 354 + vendor/github.com/hashicorp/hcl/Makefile | 18 + vendor/github.com/hashicorp/hcl/README.md | 125 + vendor/github.com/hashicorp/hcl/appveyor.yml | 19 + vendor/github.com/hashicorp/hcl/decoder.go | 724 + vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 520 + .../hashicorp/hcl/hcl/scanner/scanner.go | 651 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + vendor/github.com/hashicorp/hcl/lex.go | 38 + vendor/github.com/hashicorp/hcl/parse.go | 39 + vendor/github.com/hashicorp/hil/LICENSE | 353 + vendor/github.com/hashicorp/hil/README.md | 102 + vendor/github.com/hashicorp/hil/appveyor.yml | 18 + .../hashicorp/hil/ast/arithmetic.go | 43 + .../hashicorp/hil/ast/arithmetic_op.go | 24 + vendor/github.com/hashicorp/hil/ast/ast.go | 99 + vendor/github.com/hashicorp/hil/ast/call.go | 47 + vendor/github.com/hashicorp/hil/ast/index.go | 76 + .../github.com/hashicorp/hil/ast/literal.go | 88 + vendor/github.com/hashicorp/hil/ast/output.go | 78 + vendor/github.com/hashicorp/hil/ast/scope.go | 90 + vendor/github.com/hashicorp/hil/ast/stack.go | 25 + .../hashicorp/hil/ast/type_string.go | 54 + .../hashicorp/hil/ast/variable_access.go | 36 + .../hashicorp/hil/ast/variables_helper.go | 63 + vendor/github.com/hashicorp/hil/builtins.go | 331 + .../hashicorp/hil/check_identifier.go | 88 + .../github.com/hashicorp/hil/check_types.go | 668 + vendor/github.com/hashicorp/hil/convert.go | 159 + vendor/github.com/hashicorp/hil/eval.go | 472 + vendor/github.com/hashicorp/hil/eval_type.go | 16 + .../hashicorp/hil/evaltype_string.go | 42 + vendor/github.com/hashicorp/hil/parse.go | 29 + .../hashicorp/hil/transform_fixed.go | 29 + vendor/github.com/hashicorp/hil/walk.go | 266 + vendor/github.com/hashicorp/terraform/LICENSE | 354 + .../hashicorp/terraform/config/append.go | 86 + .../hashicorp/terraform/config/config.go | 1096 + .../terraform/config/config_string.go | 338 + .../hashicorp/terraform/config/config_tree.go | 43 + .../hashicorp/terraform/config/import_tree.go | 113 + .../hashicorp/terraform/config/interpolate.go | 386 + .../terraform/config/interpolate_funcs.go | 1390 ++ .../terraform/config/interpolate_walk.go | 283 + .../hashicorp/terraform/config/lang.go | 11 + .../hashicorp/terraform/config/loader.go | 224 + .../hashicorp/terraform/config/loader_hcl.go | 1130 + .../hashicorp/terraform/config/merge.go | 193 + .../terraform/config/module/copy_dir.go | 114 + .../hashicorp/terraform/config/module/get.go | 71 + .../terraform/config/module/inode.go | 21 + .../terraform/config/module/inode_freebsd.go | 21 + .../terraform/config/module/inode_windows.go | 8 + .../terraform/config/module/module.go | 7 + .../hashicorp/terraform/config/module/tree.go | 428 + .../terraform/config/module/tree_gob.go | 57 + .../hashicorp/terraform/config/raw_config.go | 335 + .../terraform/config/resource_mode.go | 9 + .../terraform/config/resource_mode_string.go | 16 + .../github.com/hashicorp/terraform/dag/dag.go | 286 + .../hashicorp/terraform/dag/edge.go | 37 + .../hashicorp/terraform/dag/graph.go | 391 + .../github.com/hashicorp/terraform/dag/set.go | 123 + .../hashicorp/terraform/dag/tarjan.go | 107 + .../hashicorp/terraform/flatmap/expand.go | 147 + .../hashicorp/terraform/flatmap/flatten.go | 71 + .../hashicorp/terraform/flatmap/map.go | 82 + .../helper/hilmapstructure/hilmapstructure.go | 41 + .../hashicorp/terraform/terraform/context.go | 1022 + .../terraform/terraform/context_import.go | 77 + .../hashicorp/terraform/terraform/diff.go | 866 + .../hashicorp/terraform/terraform/eval.go | 63 + .../terraform/terraform/eval_apply.go | 359 + .../terraform/eval_check_prevent_destroy.go | 38 + .../terraform/terraform/eval_context.go | 84 + .../terraform/eval_context_builtin.go | 347 + .../terraform/terraform/eval_context_mock.go | 208 + .../terraform/terraform/eval_count.go | 58 + .../terraform/terraform/eval_diff.go | 478 + .../terraform/terraform/eval_error.go | 20 + .../terraform/terraform/eval_filter.go | 25 + .../terraform/eval_filter_operation.go | 49 + .../hashicorp/terraform/terraform/eval_if.go | 26 + .../terraform/terraform/eval_import_state.go | 76 + .../terraform/terraform/eval_interpolate.go | 24 + .../terraform/terraform/eval_noop.go | 8 + .../terraform/terraform/eval_output.go | 119 + .../terraform/terraform/eval_provider.go | 164 + .../terraform/terraform/eval_provisioner.go | 47 + .../terraform/terraform/eval_read_data.go | 139 + .../terraform/terraform/eval_refresh.go | 55 + .../terraform/terraform/eval_resource.go | 13 + .../terraform/terraform/eval_sequence.go | 27 + .../terraform/terraform/eval_state.go | 324 + .../terraform/terraform/eval_validate.go | 227 + .../terraform/terraform/eval_variable.go | 279 + .../terraform/terraform/evaltree_provider.go | 119 + .../hashicorp/terraform/terraform/graph.go | 172 + .../terraform/terraform/graph_builder.go | 77 + .../terraform/graph_builder_import.go | 76 + .../terraform/terraform/graph_dot.go | 9 + .../terraform/graph_interface_subgraph.go | 7 + .../terraform/terraform/graph_walk.go | 60 + .../terraform/terraform/graph_walk_context.go | 157 + .../terraform/graph_walk_operation.go | 18 + .../hashicorp/terraform/terraform/hook.go | 137 + .../terraform/terraform/hook_mock.go | 245 + .../terraform/terraform/hook_stop.go | 87 + .../terraform/terraform/instancetype.go | 13 + .../terraform/instancetype_string.go | 16 + .../terraform/terraform/interpolate.go | 782 + .../hashicorp/terraform/terraform/path.go | 24 + .../hashicorp/terraform/terraform/plan.go | 153 + .../hashicorp/terraform/terraform/resource.go | 360 + .../terraform/terraform/resource_address.go | 301 + .../terraform/terraform/resource_provider.go | 204 + .../terraform/resource_provider_mock.go | 297 + .../terraform/resource_provisioner.go | 54 + .../terraform/resource_provisioner_mock.go | 72 + .../terraform/terraform/semantics.go | 132 + .../hashicorp/terraform/terraform/state.go | 2118 ++ .../terraform/terraform/state_add.go | 374 + .../terraform/terraform/state_filter.go | 267 + .../terraform/state_upgrade_v1_to_v2.go | 189 + .../terraform/state_upgrade_v2_to_v3.go | 142 + .../hashicorp/terraform/terraform/state_v1.go | 145 + .../terraform/terraform/transform.go | 52 + .../terraform/terraform/transform_config.go | 135 + .../terraform/terraform/transform_deposed.go | 168 + .../terraform/terraform/transform_expand.go | 48 + .../terraform/transform_import_state.go | 241 + .../terraform/terraform/transform_output.go | 59 + .../terraform/terraform/transform_provider.go | 380 + .../terraform/transform_provisioner.go | 206 + .../terraform/terraform/transform_root.go | 38 + .../terraform/terraform/transform_targets.go | 219 + .../transform_transitive_reduction.go | 20 + .../terraform/terraform/transform_vertex.go | 44 + .../hashicorp/terraform/terraform/ui_input.go | 26 + .../terraform/terraform/ui_input_mock.go | 23 + .../terraform/terraform/ui_input_prefix.go | 19 + .../terraform/terraform/ui_output.go | 7 + .../terraform/terraform/ui_output_callback.go | 9 + .../terraform/terraform/ui_output_mock.go | 16 + .../terraform/ui_output_provisioner.go | 15 + .../hashicorp/terraform/terraform/util.go | 93 + .../terraform/terraform/variables.go | 166 + .../hashicorp/terraform/terraform/version.go | 31 + .../terraform/walkoperation_string.go | 16 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 12 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 840 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../mitchellh/copystructure/copystructure.go | 545 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + .../mitchellh/go-homedir/homedir.go | 137 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 158 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 828 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../mitchellh/reflectwalk/location.go | 19 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 401 + vendor/github.com/satori/go.uuid/LICENSE | 20 + vendor/github.com/satori/go.uuid/README.md | 65 + vendor/github.com/satori/go.uuid/uuid.go | 488 + 328 files changed, 77430 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/apparentlymart/go-cidr/LICENSE create mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go create mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/bgentry/go-netrc/LICENSE create mode 100644 vendor/github.com/bgentry/go-netrc/netrc/netrc.go create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-getter/LICENSE create mode 100644 vendor/github.com/hashicorp/go-getter/README.md create mode 100644 vendor/github.com/hashicorp/go-getter/appveyor.yml create mode 100644 vendor/github.com/hashicorp/go-getter/client.go create mode 100644 vendor/github.com/hashicorp/go-getter/client_mode.go create mode 100644 vendor/github.com/hashicorp/go-getter/copy_dir.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_bzip2.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_gzip.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tbz2.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_testing.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tgz.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_zip.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_bitbucket.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_file.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_github.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_s3.go create mode 100644 vendor/github.com/hashicorp/go-getter/folder_storage.go create mode 100644 vendor/github.com/hashicorp/go-getter/get.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_file.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_file_unix.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_file_windows.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_git.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_hg.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_http.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_mock.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_s3.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go create mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go create mode 100644 vendor/github.com/hashicorp/go-getter/netrc.go create mode 100644 vendor/github.com/hashicorp/go-getter/source.go create mode 100644 vendor/github.com/hashicorp/go-getter/storage.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE create mode 100644 vendor/github.com/hashicorp/go-uuid/README.md create mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/Makefile create mode 100644 vendor/github.com/hashicorp/hcl/README.md create mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/hashicorp/hil/LICENSE create mode 100644 vendor/github.com/hashicorp/hil/README.md create mode 100644 vendor/github.com/hashicorp/hil/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic.go create mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic_op.go create mode 100644 vendor/github.com/hashicorp/hil/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hil/ast/call.go create mode 100644 vendor/github.com/hashicorp/hil/ast/index.go create mode 100644 vendor/github.com/hashicorp/hil/ast/literal.go create mode 100644 vendor/github.com/hashicorp/hil/ast/output.go create mode 100644 vendor/github.com/hashicorp/hil/ast/scope.go create mode 100644 vendor/github.com/hashicorp/hil/ast/stack.go create mode 100644 vendor/github.com/hashicorp/hil/ast/type_string.go create mode 100644 vendor/github.com/hashicorp/hil/ast/variable_access.go create mode 100644 vendor/github.com/hashicorp/hil/ast/variables_helper.go create mode 100644 vendor/github.com/hashicorp/hil/builtins.go create mode 100644 vendor/github.com/hashicorp/hil/check_identifier.go create mode 100644 vendor/github.com/hashicorp/hil/check_types.go create mode 100644 vendor/github.com/hashicorp/hil/convert.go create mode 100644 vendor/github.com/hashicorp/hil/eval.go create mode 100644 vendor/github.com/hashicorp/hil/eval_type.go create mode 100644 vendor/github.com/hashicorp/hil/evaltype_string.go create mode 100644 vendor/github.com/hashicorp/hil/parse.go create mode 100644 vendor/github.com/hashicorp/hil/transform_fixed.go create mode 100644 vendor/github.com/hashicorp/hil/walk.go create mode 100644 vendor/github.com/hashicorp/terraform/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform/config/append.go create mode 100644 vendor/github.com/hashicorp/terraform/config/config.go create mode 100644 vendor/github.com/hashicorp/terraform/config/config_string.go create mode 100644 vendor/github.com/hashicorp/terraform/config/config_tree.go create mode 100644 vendor/github.com/hashicorp/terraform/config/import_tree.go create mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate.go create mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go create mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate_walk.go create mode 100644 vendor/github.com/hashicorp/terraform/config/lang.go create mode 100644 vendor/github.com/hashicorp/terraform/config/loader.go create mode 100644 vendor/github.com/hashicorp/terraform/config/loader_hcl.go create mode 100644 vendor/github.com/hashicorp/terraform/config/merge.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/copy_dir.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/get.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/inode.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/inode_windows.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/module.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/tree.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/tree_gob.go create mode 100644 vendor/github.com/hashicorp/terraform/config/raw_config.go create mode 100644 vendor/github.com/hashicorp/terraform/config/resource_mode.go create mode 100644 vendor/github.com/hashicorp/terraform/config/resource_mode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/dag.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/edge.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/graph.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/set.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/tarjan.go create mode 100644 vendor/github.com/hashicorp/terraform/flatmap/expand.go create mode 100644 vendor/github.com/hashicorp/terraform/flatmap/flatten.go create mode 100644 vendor/github.com/hashicorp/terraform/flatmap/map.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/context.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_import.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/diff.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_apply.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_diff.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_error.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_filter.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_if.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_noop.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_output.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_resource.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_state.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_validate.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_variable.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_dot.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook_stop.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/instancetype.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/interpolate.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/path.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/plan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_address.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/semantics.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/state.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_add.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_filter.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_v1.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_expand.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_output.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_root.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_targets.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/util.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/variables.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/version.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go create mode 100644 vendor/github.com/satori/go.uuid/LICENSE create mode 100644 vendor/github.com/satori/go.uuid/README.md create mode 100644 vendor/github.com/satori/go.uuid/uuid.go diff --git a/Gopkg.lock b/Gopkg.lock index c447fec6..7cc11f94 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "f30aaa96a2df1a4b87dbde2228e22622ad26ba0d2c1d1b0ba651bc641d542744" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] branch = "master" @@ -119,3 +120,10 @@ memo = "f30aaa96a2df1a4b87dbde2228e22622ad26ba0d2c1d1b0ba651bc641d542744" packages = ["."] revision = "879c5887cd475cd7864858769793b2ceb0d44feb" version = "v1.1.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "2827910e476b549eceb1bca2db7b98f0ab3eb0e99f171e9c4a394e141cff6c45" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE new file mode 100644 index 00000000..21253788 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go new file mode 100644 index 00000000..a31cdec7 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -0,0 +1,112 @@ +// Package cidr is a collection of assorted utilities for computing +// network and host addresses within network ranges. +// +// It expects a CIDR-type address structure where addresses are divided into +// some number of prefix bits representing the network and then the remaining +// suffix bits represent the host. +// +// For example, it can help to calculate addresses for sub-networks of a +// parent network, or to calculate host addresses within a particular prefix. +// +// At present this package is prioritizing simplicity of implementation and +// de-prioritizing speed and memory usage. Thus caution is advised before +// using this package in performance-critical applications or hot codepaths. +// Patches to improve the speed and memory usage may be accepted as long as +// they do not result in a significant increase in code complexity. +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +// Subnet takes a parent CIDR range and creates a subnet within it +// with the given number of additional prefix bits and the given +// network number. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number +// of 5, becomes 10.3.5.0/24 . +func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + newPrefixLen := parentLen + newBits + + if newPrefixLen > addrLen { + return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) + } + + maxNetNum := uint64(1< maxNetNum { + return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) + } + + return &net.IPNet{ + IP: insertNumIntoIP(ip, num, newPrefixLen), + Mask: net.CIDRMask(newPrefixLen, addrLen), + }, nil +} + +// Host takes a parent CIDR range and turns it into a host IP address with +// the given host number. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func Host(base *net.IPNet, num int) (net.IP, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + hostLen := addrLen - parentLen + + maxHostNum := uint64(1< maxHostNum { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + + return insertNumIntoIP(ip, num, 32), nil +} + +// AddressRange returns the first and last addresses in the given CIDR range. +func AddressRange(network *net.IPNet) (net.IP, net.IP) { + // the first IP is easy + firstIP := network.IP + + // the last IP is the network address OR NOT the mask address + prefixLen, bits := network.Mask.Size() + if prefixLen == bits { + // Easy! + // But make sure that our two slices are distinct, since they + // would be in all other cases. + lastIP := make([]byte, len(firstIP)) + copy(lastIP, firstIP) + return firstIP, lastIP + } + + firstIPInt, bits := ipToInt(firstIP) + hostLen := uint(bits) - uint(prefixLen) + lastIPInt := big.NewInt(1) + lastIPInt.Lsh(lastIPInt, hostLen) + lastIPInt.Sub(lastIPInt, big.NewInt(1)) + lastIPInt.Or(lastIPInt, firstIPInt) + + return firstIP, intToIP(lastIPInt, bits) +} + +// AddressCount returns the number of distinct host addresses within the given +// CIDR range. +// +// Since the result is a uint64, this function returns meaningful information +// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. +func AddressCount(network *net.IPNet) uint64 { + prefixLen, bits := network.Mask.Size() + return 1 << (uint64(bits) - uint64(prefixLen)) +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go new file mode 100644 index 00000000..861a5f62 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -0,0 +1,38 @@ +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +func ipToInt(ip net.IP) (*big.Int, int) { + val := &big.Int{} + val.SetBytes([]byte(ip)) + if len(ip) == net.IPv4len { + return val, 32 + } else if len(ip) == net.IPv6len { + return val, 128 + } else { + panic(fmt.Errorf("Unsupported address length %d", len(ip))) + } +} + +func intToIP(ipInt *big.Int, bits int) net.IP { + ipBytes := ipInt.Bytes() + ret := make([]byte, bits/8) + // Pack our IP bytes into the end of the return array, + // since big.Int.Bytes() removes front zero padding. + for i := 1; i <= len(ipBytes); i++ { + ret[len(ret)-i] = ipBytes[len(ipBytes)-i] + } + return net.IP(ret) +} + +func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP { + ipInt, totalBits := ipToInt(ip) + bigNum := big.NewInt(int64(num)) + bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) + ipInt.Or(ipInt, bigNum) + return intToIP(ipInt, totalBits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000..5f14d116 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000..56fdfc2b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000..0202a008 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000..1a3d106d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000..59fa4a55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000..11c52c38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000..710eb432 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000..b6432f1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 00000000..788fe6e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,90 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) + c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000..1313478f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,96 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000..4778056d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 00000000..d1f31f1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,470 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 00000000..3b73a7da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000..25b461c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,226 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 10 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(10 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000..7d50b155 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000..f298d659 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,102 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000..42416fc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,246 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000..c3974952 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 00000000..a4cec5c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000..c14231a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,78 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000..7fc91d9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000..7fb7cbf0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000..4f5dab3f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000..4108e433 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,298 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000..07afe3b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,163 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + }, + }) +} + +const ( + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { + errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000..984407a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,162 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "user-data"), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000..5b4379db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 00000000..57663616 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 00000000..db87188e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000..6c14336f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,225 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + Complete HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.Complete.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 00000000..79f79602 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 00000000..02f07f4a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,58 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 00000000..4f4f1123 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,575 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is recieved + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + + if r.Operation.BeforePresignFn != nil { + r = r.copy() + err := r.Operation.BeforePresignFn(r) + if err != nil { + return "", err + } + } + + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// ResetBody rewinds the request body backto its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +func (r *Request) ResetBody() { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := computeBodyLength(r.Body) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return + } + + if l == 0 { + r.HTTPRequest.Body = noBodyReader + } else if l > 0 { + r.HTTPRequest.Body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + r.HTTPRequest.Body = noBodyReader + default: + r.HTTPRequest.Body = r.safeBody + } + } +} + +// Attempts to compute the length of the body of the reader using the +// io.Seeker interface. If the value is not seekable because of being +// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. +// If no error occurs the length of the body will be returned. +func computeBodyLength(r io.ReadSeeker) (int64, error) { + seekable := true + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := r.(type) { + case aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + case *aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + } + if !seekable { + return -1, nil + } + + curOffset, err := r.Seek(0, 1) + if err != nil { + return 0, err + } + + endOffset, err := r.Seek(0, 2) + if err != nil { + return 0, err + } + + _, err = r.Seek(curOffset, 0) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if !shouldRetryCancel(r) { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000..59de6736 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,236 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + + started bool + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + return !(p.started && len(p.nextTokens) == 0) +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000..7af81de2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,154 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: struct{}{}, + ErrCodeRead: struct{}{}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 00000000..2520286b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 00000000..ea7b886f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,273 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess := session.Must(session.NewSession()) + + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) + + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) + + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) + + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) + + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) + + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 00000000..e6278a78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,208 @@ +package session + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + cfg.SharedCredentialsFile = sharedCredentialsFilename() + cfg.SharedConfigFile = sharedConfigFilename() + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func sharedCredentialsFilename() string { + if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +func sharedConfigFilename() string { + if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "config") +} + +func userHomeDir() string { + homeDir := os.Getenv("HOME") // *nix + if len(homeDir) == 0 { // windows + homeDir = os.Getenv("USERPROFILE") + } + + return homeDir +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000..4792d3ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,590 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(Options{}, envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return deprecatedNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Order config files will be loaded in with later files overwriting + // previous config file values. + cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } + + return nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 00000000..b58076f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,295 @@ +package session + +import ( + "fmt" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + // Skip files which can't be opened and read for whatever reason + continue + } + + f, err := ini.Load(b) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 00000000..244c86da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 00000000..434ac872 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,761 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disales the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, signingTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 00000000..0e2d864e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,118 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 00000000..f4897623 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.8.22" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000..53831dff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000..18169f0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000..524ca952 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,237 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000..e0f4d5a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000..f2142961 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000..71618356 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,290 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + case aws.JSONValue: + b, err := json.Marshal(value) + if err != nil { + return "", err + } + if tag.Get("location") == "header" { + str = base64.StdEncoding.EncodeToString(b) + } else { + str = string(b) + } + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000..4366de2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000..7a779ee2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,227 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + b := []byte(header) + var err error + if tag.Get("location") == "header" { + b, err = base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 00000000..7bdf4c85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000..da1a6811 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000..7091b456 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,296 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000..87584628 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000..3e970b62 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,147 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 00000000..52ac02ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,19245 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AbortMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CompleteMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CopyObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon Glacier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Please select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the tag-set from an existing object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObjects for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAccelerateConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the accelerate configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// Gets the access control policy for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLifecycleConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the lifecycle configuration information set on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLocation for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the region the bucket resides in. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLogging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketNotification for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketNotificationConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketNotificationConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketRequestPayment for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketVersioning for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectTorrent for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Return torrent files from a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See HeadBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See HeadObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketAnalyticsConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketAnalyticsConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketInventoryConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketInventoryConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketMetricsConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketMetricsConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBuckets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListMultipartUploads for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjectVersions for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all of the versions of objects in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjects for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjectsV2 for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + } + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListParts for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAccelerateConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on a bucket using access control lists (ACL). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Adds an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLifecycleConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLogging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketNotification for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketNotificationConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a new replication configuration (or replaces an existing one, if +// present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketRequestPayment for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketVersioning for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Set the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObjectAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See RestoreObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UploadPart for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UploadPartCopy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // If present, it indicates that data related to access patterns will be collected + // and made available to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination bucket. If no account ID is provided, + // the owner will not be validated prior to exporting data. + BucketAccountId *string `type:"string"` + + // The file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The exported data begins with this + // prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete +type Delete struct { + _ struct{} `type:"structure"` + + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest +type DeleteObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Container for key value pair that defines the criteria for the filter rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest +type GetBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest +type GetBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the bucket to get the notification configuration for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest +type GetObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest +type GetObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies which object version(s) to included in the inventory results. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The ID of the account that owns the destination bucket. + AccountId *string `type:"string"` + + // The Amazon resource name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Container for object key name prefix and suffix filtering rules. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// Container for specifying the AWS Lambda notification configuration. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // deprecated; use Filter instead. + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The ContinuationToken that represents where this request began. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of inventory configurations is truncated + // in this response. A value of true indicates that the list is truncated. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest +type ListPartsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + + // Name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest +type PutBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest +type PutObjectTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` + + // Glacier related prameters pertaining to this job. + GlacierJobParameters *GlacierJobParameters `type:"structure"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // A container used to describe how data related to the storage class analysis + // should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging +type Tagging struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" +) + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" +) + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 00000000..bc68a46a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,106 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 00000000..9fc5df94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 00000000..84633472 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,46 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 00000000..ec3ffe44 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,162 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + b, _ := awsutil.ValuesAtPath(params, "Bucket") + if len(b) == 0 { + return "", false + } + + if bucket, ok := b[0].(*string); ok { + if bucketStr := aws.StringValue(bucket); bucketStr != "" { + return bucketStr, true + } + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 00000000..8e6f3307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 00000000..14d05f7b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 00000000..614e477d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 00000000..268ea2fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 00000000..5a78fd33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,35 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 00000000..bcca8627 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,103 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } + return + } + + var errCode, errMsg string + + // Attempt to parse error from body if it is known + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + errCode = "SerializationError" + errMsg = "failed to decode S3 XML error response" + } else { + errCode = resp.Code + errMsg = resp.Message + err = nil + } + + // Fallback to status code converted to message if still no error code + if len(errCode) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errCode = strings.Replace(statusText, " ", "", -1) + errMsg = statusText + } + + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New(errCode, errMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +type requestFailure struct { + awserr.RequestFailure + + hostID string +} + +func (r requestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r requestFailure) String() string { + return r.Error() +} +func (r requestFailure) HostID() string { + return r.hostID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 00000000..cccfa8c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 00000000..2de65288 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2365 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRole for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithSAML for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithWebIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DecodeAuthorizationMessage for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetCallerIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetFederationToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM APIs. +// +// * You cannot call any STS APIs except GetCallerIdentity. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSessionToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:\/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000..4010cc7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 00000000..1ee5839e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE new file mode 100644 index 00000000..aade9a58 --- /dev/null +++ b/vendor/github.com/bgentry/go-netrc/LICENSE @@ -0,0 +1,20 @@ +Original version Copyright © 2010 Fazlul Shahriar . Newer +portions Copyright © 2014 Blake Gentry . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go new file mode 100644 index 00000000..ea49987c --- /dev/null +++ b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go @@ -0,0 +1,510 @@ +package netrc + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type tkType int + +const ( + tkMachine tkType = iota + tkDefault + tkLogin + tkPassword + tkAccount + tkMacdef + tkComment + tkWhitespace +) + +var keywords = map[string]tkType{ + "machine": tkMachine, + "default": tkDefault, + "login": tkLogin, + "password": tkPassword, + "account": tkAccount, + "macdef": tkMacdef, + "#": tkComment, +} + +type Netrc struct { + tokens []*token + machines []*Machine + macros Macros + updateLock sync.Mutex +} + +// FindMachine returns the Machine in n named by name. If a machine named by +// name exists, it is returned. If no Machine with name name is found and there +// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil +// is returned. +func (n *Netrc) FindMachine(name string) (m *Machine) { + // TODO(bgentry): not safe for concurrency + var def *Machine + for _, m = range n.machines { + if m.Name == name { + return m + } + if m.IsDefault() { + def = m + } + } + if def == nil { + return nil + } + return def +} + +// MarshalText implements the encoding.TextMarshaler interface to encode a +// Netrc into text format. +func (n *Netrc) MarshalText() (text []byte, err error) { + // TODO(bgentry): not safe for concurrency + for i := range n.tokens { + switch n.tokens[i].kind { + case tkComment, tkDefault, tkWhitespace: // always append these types + text = append(text, n.tokens[i].rawkind...) + default: + if n.tokens[i].value != "" { // skip empty-value tokens + text = append(text, n.tokens[i].rawkind...) + } + } + if n.tokens[i].kind == tkMacdef { + text = append(text, ' ') + text = append(text, n.tokens[i].macroName...) + } + text = append(text, n.tokens[i].rawvalue...) + } + return +} + +func (n *Netrc) NewMachine(name, login, password, account string) *Machine { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + prefix := "\n" + if len(n.tokens) == 0 { + prefix = "" + } + m := &Machine{ + Name: name, + Login: login, + Password: password, + Account: account, + + nametoken: &token{ + kind: tkMachine, + rawkind: []byte(prefix + "machine"), + value: name, + rawvalue: []byte(" " + name), + }, + logintoken: &token{ + kind: tkLogin, + rawkind: []byte("\n\tlogin"), + value: login, + rawvalue: []byte(" " + login), + }, + passtoken: &token{ + kind: tkPassword, + rawkind: []byte("\n\tpassword"), + value: password, + rawvalue: []byte(" " + password), + }, + accounttoken: &token{ + kind: tkAccount, + rawkind: []byte("\n\taccount"), + value: account, + rawvalue: []byte(" " + account), + }, + } + n.insertMachineTokensBeforeDefault(m) + for i := range n.machines { + if n.machines[i].IsDefault() { + n.machines = append(append(n.machines[:i], m), n.machines[i:]...) + return m + } + } + n.machines = append(n.machines, m) + return m +} + +func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) { + newtokens := []*token{m.nametoken} + if m.logintoken.value != "" { + newtokens = append(newtokens, m.logintoken) + } + if m.passtoken.value != "" { + newtokens = append(newtokens, m.passtoken) + } + if m.accounttoken.value != "" { + newtokens = append(newtokens, m.accounttoken) + } + for i := range n.tokens { + if n.tokens[i].kind == tkDefault { + // found the default, now insert tokens before it + n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...) + return + } + } + // didn't find a default, just add the newtokens to the end + n.tokens = append(n.tokens, newtokens...) + return +} + +func (n *Netrc) RemoveMachine(name string) { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + for i := range n.machines { + if n.machines[i] != nil && n.machines[i].Name == name { + m := n.machines[i] + for _, t := range []*token{ + m.nametoken, m.logintoken, m.passtoken, m.accounttoken, + } { + n.removeToken(t) + } + n.machines = append(n.machines[:i], n.machines[i+1:]...) + return + } + } +} + +func (n *Netrc) removeToken(t *token) { + if t != nil { + for i := range n.tokens { + if n.tokens[i] == t { + n.tokens = append(n.tokens[:i], n.tokens[i+1:]...) + return + } + } + } +} + +// Machine contains information about a remote machine. +type Machine struct { + Name string + Login string + Password string + Account string + + nametoken *token + logintoken *token + passtoken *token + accounttoken *token +} + +// IsDefault returns true if the machine is a "default" token, denoted by an +// empty name. +func (m *Machine) IsDefault() bool { + return m.Name == "" +} + +// UpdatePassword sets the password for the Machine m. +func (m *Machine) UpdatePassword(newpass string) { + m.Password = newpass + updateTokenValue(m.passtoken, newpass) +} + +// UpdateLogin sets the login for the Machine m. +func (m *Machine) UpdateLogin(newlogin string) { + m.Login = newlogin + updateTokenValue(m.logintoken, newlogin) +} + +// UpdateAccount sets the login for the Machine m. +func (m *Machine) UpdateAccount(newaccount string) { + m.Account = newaccount + updateTokenValue(m.accounttoken, newaccount) +} + +func updateTokenValue(t *token, value string) { + oldvalue := t.value + t.value = value + newraw := make([]byte, len(t.rawvalue)) + copy(newraw, t.rawvalue) + t.rawvalue = append( + bytes.TrimSuffix(newraw, []byte(oldvalue)), + []byte(value)..., + ) +} + +// Macros contains all the macro definitions in a netrc file. +type Macros map[string]string + +type token struct { + kind tkType + macroName string + value string + rawkind []byte + rawvalue []byte +} + +// Error represents a netrc file parse error. +type Error struct { + LineNum int // Line number + Msg string // Error message +} + +// Error returns a string representation of error e. +func (e *Error) Error() string { + return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg) +} + +func (e *Error) BadDefaultOrder() bool { + return e.Msg == errBadDefaultOrder +} + +const errBadDefaultOrder = "default token must appear after all machine tokens" + +// scanLinesKeepPrefix is a split function for a Scanner that returns each line +// of text. The returned token may include newlines if they are before the +// first non-space character. The returned line may be empty. The end-of-line +// marker is one optional carriage return followed by one mandatory newline. In +// regular expression notation, it is `\r?\n`. The last non-empty line of +// input will be returned even if it has no newline. +func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if i := bytes.IndexByte(data[start:], '\n'); i >= 0 { + // We have a full newline-terminated line. + return start + i, data[0 : start+i], nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// scanWordsKeepPrefix is a split function for a Scanner that returns each +// space-separated word of text, with prefixing spaces included. It will never +// return an empty string. The definition of space is set by unicode.IsSpace. +// +// Adapted from bufio.ScanWords(). +func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if atEOF && len(data) == 0 || start == len(data) { + return len(data), data, nil + } + if len(data) > start && data[start] == '#' { + return scanLinesKeepPrefix(data, atEOF) + } + // Scan until space, marking end of word. + for width, i := 0, start; i < len(data); i += width { + var r rune + r, width = utf8.DecodeRune(data[i:]) + if unicode.IsSpace(r) { + return i, data[:i], nil + } + } + // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. + if atEOF && len(data) > start { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +func newToken(rawb []byte) (*token, error) { + _, tkind, err := bufio.ScanWords(rawb, true) + if err != nil { + return nil, err + } + var ok bool + t := token{rawkind: rawb} + t.kind, ok = keywords[string(tkind)] + if !ok { + trimmed := strings.TrimSpace(string(tkind)) + if trimmed == "" { + t.kind = tkWhitespace // whitespace-only, should happen only at EOF + return &t, nil + } + if strings.HasPrefix(trimmed, "#") { + t.kind = tkComment // this is a comment + return &t, nil + } + return &t, fmt.Errorf("keyword expected; got " + string(tkind)) + } + return &t, nil +} + +func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { + if scanner.Scan() { + raw := scanner.Bytes() + pos += bytes.Count(raw, []byte{'\n'}) + return raw, strings.TrimSpace(string(raw)), pos, nil + } + if err := scanner.Err(); err != nil { + return nil, "", pos, &Error{pos, err.Error()} + } + return nil, "", pos, nil +} + +func parse(r io.Reader, pos int) (*Netrc, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)} + + defaultSeen := false + var currentMacro *token + var m *Machine + var t *token + scanner := bufio.NewScanner(bytes.NewReader(b)) + scanner.Split(scanTokensKeepPrefix) + + for scanner.Scan() { + rawb := scanner.Bytes() + if len(rawb) == 0 { + break + } + pos += bytes.Count(rawb, []byte{'\n'}) + t, err = newToken(rawb) + if err != nil { + if currentMacro == nil { + return nil, &Error{pos, err.Error()} + } + currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...) + continue + } + + if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) { + // if macro rawvalue + rawb would contain \n\n, then macro def is over + currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n") + nrc.macros[currentMacro.macroName] = currentMacro.value + currentMacro = nil + } + + switch t.kind { + case tkMacdef: + if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + currentMacro = t + case tkDefault: + if defaultSeen { + return nil, &Error{pos, "multiple default token"} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + m.Name = "" + defaultSeen = true + case tkMachine: + if defaultSeen { + return nil, &Error{pos, errBadDefaultOrder} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Name + m.nametoken = t + case tkLogin: + if m == nil || m.Login != "" { + return nil, &Error{pos, "unexpected token login "} + } + if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Login + m.logintoken = t + case tkPassword: + if m == nil || m.Password != "" { + return nil, &Error{pos, "unexpected token password"} + } + if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Password + m.passtoken = t + case tkAccount: + if m == nil || m.Account != "" { + return nil, &Error{pos, "unexpected token account"} + } + if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Account + m.accounttoken = t + } + + nrc.tokens = append(nrc.tokens, t) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + return &nrc, nil +} + +// ParseFile opens the file at filename and then passes its io.Reader to +// Parse(). +func ParseFile(filename string) (*Netrc, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, err + } + defer fd.Close() + return Parse(fd) +} + +// Parse parses from the the Reader r as a netrc file and returns the set of +// machine information and macros defined in it. The ``default'' machine, +// which is intended to be used when no machine name matches, is identified +// by an empty machine name. There can be only one ``default'' machine. +// +// If there is a parsing error, an Error is returned. +func Parse(r io.Reader) (*Netrc, error) { + return parse(r, 1) +} + +// FindMachine parses the netrc file identified by filename and returns the +// Machine named by name. If a problem occurs parsing the file at filename, an +// error is returned. If a machine named by name exists, it is returned. If no +// Machine with name name is found and there is a ``default'' machine, the +// ``default'' machine is returned. Otherwise, nil is returned. +func FindMachine(filename, name string) (m *Machine, err error) { + n, err := ParseFile(filename) + if err != nil { + return nil, err + } + return n.FindMachine(name), nil +} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 00000000..85947422 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,740 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 00000000..163432db --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,727 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("section name") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 00000000..5211d5ab --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,556 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.27.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 00000000..6c0b1074 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,361 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + + // Check continuation lines when desired + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 00000000..031c78b8 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,450 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, _ = key.parseInts(strs, true, false) + case reflect.Int64: + vals, _ = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, _ = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, _ = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, _ = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 00000000..1c95f597 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, ErrNotExist) { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 00000000..a733bef1 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md new file mode 100644 index 00000000..e651a34a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -0,0 +1,257 @@ +# go-getter + +[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-getter +[godocs]: http://godoc.org/github.com/hashicorp/go-getter +[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master + +go-getter is a library for Go (golang) for downloading files or directories +from various sources using a URL as the primary form of input. + +The power of this library is being flexible in being able to download +from a number of different sources (file paths, Git, HTTP, Mercurial, etc.) +using a single string as input. This removes the burden of knowing how to +download from a variety of sources from the implementer. + +The concept of a _detector_ automatically turns invalid URLs into proper +URLs. For example: "github.com/hashicorp/go-getter" would turn into a +Git URL. Or "./foo" would turn into a file URL. These are extensible. + +This library is used by [Terraform](https://terraform.io) for +downloading modules, [Otto](https://ottoproject.io) for dependencies and +Appfile imports, and [Nomad](https://nomadproject.io) for downloading +binaries. + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-getter). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-getter +``` + +go-getter also has a command you can use to test URL strings: + +``` +$ go install github.com/hashicorp/go-getter/cmd/go-getter +... + +$ go-getter github.com/foo/bar ./foo +... +``` + +The command is useful for verifying URL structures. + +## URL Format + +go-getter uses a single string URL as input to download from a variety of +protocols. go-getter has various "tricks" with this URL to do certain things. +This section documents the URL format. + +### Supported Protocols and Detectors + +**Protocols** are used to download files/directories using a specific +mechanism. Example protocols are Git and HTTP. + +**Detectors** are used to transform a valid or invalid URL into another +URL if it matches a certain pattern. Example: "github.com/user/repo" is +automatically transformed into a fully valid Git URL. This allows go-getter +to be very user friendly. + +go-getter out of the box supports the following protocols. Additional protocols +can be augmented at runtime by implementing the `Getter` interface. + + * Local files + * Git + * Mercurial + * HTTP + * Amazon S3 + +In addition to the above protocols, go-getter has what are called "detectors." +These take a URL and attempt to automatically choose the best protocol for +it, which might involve even changing the protocol. The following detection +is built-in by default: + + * File paths such as "./foo" are automatically changed to absolute + file URLs. + * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically + changed to Git protocol over HTTP. + * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically + changed to a Git or mercurial protocol using the BitBucket API. + +### Forced Protocol + +In some cases, the protocol to use is ambiguous depending on the source +URL. For example, "http://github.com/mitchellh/vagrant.git" could reference +an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this +URL. + +Forced protocol can be done by prefixing the URL with the protocol followed +by double colons. For example: `git::http://github.com/mitchellh/vagrant.git` +would download the given HTTP URL using the Git protocol. + +Forced protocols will also override any detectors. + +In the absense of a forced protocol, detectors may be run on the URL, transforming +the protocol anyways. The above example would've used the Git protocol either +way since the Git detector would've detected it was a GitHub URL. + +### Protocol-Specific Options + +Each protocol can support protocol-specific options to configure that +protocol. For example, the `git` protocol supports specifying a `ref` +query parameter that tells it what ref to checkout for that Git +repository. + +The options are specified as query parameters on the URL (or URL-like string) +given to go-getter. Using the Git example above, the URL below is a valid +input to go-getter: + + github.com/hashicorp/go-getter?ref=abcd1234 + +The protocol-specific options are documented below the URL format +section. But because they are part of the URL, we point it out here so +you know they exist. + +### Checksumming + +For file downloads of any protocol, go-getter can automatically verify +a checksum for you. Note that checksumming only works for downloading files, +not directories, but checksumming will work for any protocol. + +To checksum a file, append a `checksum` query parameter to the URL. +The paramter value should be in the format of `type:value`, where +type is "md5", "sha1", "sha256", or "sha512". The "value" should be +the actual checksum value. go-getter will parse out this query parameter +automatically and use it to verify the checksum. An example URL +is shown below: + +``` +./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +The checksum query parameter is never sent to the backend protocol +implementation. It is used at a higher level by go-getter itself. + +### Unarchiving + +go-getter will automatically unarchive files into a file or directory +based on the extension of the file being requested (over any protocol). +This works for both file and directory downloads. + +go-getter looks for an `archive` query parameter to specify the format of +the archive. If this isn't specified, go-getter will use the extension of +the path to see if it appears archived. Unarchiving can be explicitly +disabled by setting the `archive` query parameter to `false`. + +The following archive formats are supported: + + * `tar.gz` and `tgz` + * `tar.bz2` and `tbz2` + * `zip` + * `gz` + * `bz2` + +For example, an example URL is shown below: + +``` +./foo.zip +``` + +This will automatically be inferred to be a ZIP file and will be extracted. +You can also be explicit about the archive type: + +``` +./some/other/path?archive=zip +``` + +And finally, you can disable archiving completely: + +``` +./some/path?archive=false +``` + +You can combine unarchiving with the other features of go-getter such +as checksumming. The special `archive` query parameter will be removed +from the URL before going to the final protocol downloader. + +## Protocol-Specific Options + +This section documents the protocol-specific options that can be specified +for go-getter. These options should be appended to the input as normal query +parameters. Depending on the usage of go-getter, applications may provide +alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io) +provides a nice options block for specifying options rather than in the URL. + +## General (All Protocols) + +The options below are available to all protocols: + + * `archive` - The archive format to use to unarchive this file, or "" (empty + string) to disable unarchiving. For more details, see the complete section + on archive support above. + + * `checksum` - Checksum to verify the downloaded file or archive. See + the entire section on checksumming above for format and more details. + +### Local Files (`file`) + +None + +### Git (`git`) + + * `ref` - The Git ref to checkout. This is a ref, so it can point to + a commit SHA, a branch name, etc. If it is a named ref such as a branch + name, go-getter will update it to the latest on each get. + + * `sshkey` - An SSH private key to use during clones. The provided key must + be a base64-encoded string. For example, to generate a suitable `sshkey` + from a private key file on disk, you would run `base64 -w0 `. + + **Note**: Git 2.3+ is required to use this feature. + +### Mercurial (`hg`) + + * `rev` - The Mercurial revision to checkout. + +### HTTP (`http`) + +#### Basic Authentication + +To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the +hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special +characters, including the username and password, must be URL encoded. + +### S3 (`s3`) + +S3 takes various access configurations in the URL. Note that it will also +read these from standard AWS environment variables if they're set. If +the query parameters are present, these take priority. + + * `aws_access_key_id` - AWS access key. + * `aws_access_key_secret` - AWS access key secret. + * `aws_access_token` - AWS access token if this is being used. + +#### Using IAM Instance Profiles with S3 + +If you use go-getter and want to use an EC2 IAM Instance Profile to avoid +using credentials, then just omit these and the profile, if available will +be used automatically. + +#### S3 Bucket Examples + +S3 has several addressing schemes used to reference your bucket. These are +listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + +Some examples for these addressing schemes: +- s3::https://s3.amazonaws.com/bucket/foo +- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo +- bucket.s3.amazonaws.com/foo +- bucket.s3-eu-west-1.amazonaws.com/foo/bar + diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml new file mode 100644 index 00000000..159dad4d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -0,0 +1,16 @@ +version: "build-{branch}-{build}" +image: Visual Studio 2015 +clone_folder: c:\gopath\github.com\hashicorp\go-getter +environment: + GOPATH: c:\gopath +install: +- cmd: >- + echo %Path% + + go version + + go env + + go get -d -v -t ./... +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go new file mode 100644 index 00000000..876812a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -0,0 +1,335 @@ +package getter + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// Client is a client for downloading things. +// +// Top-level functions such as Get are shortcuts for interacting with a client. +// Using a client directly allows more fine-grained control over how downloading +// is done, as well as customizing the protocols supported. +type Client struct { + // Src is the source URL to get. + // + // Dst is the path to save the downloaded thing as. If Dir is set to + // true, then this should be a directory. If the directory doesn't exist, + // it will be created for you. + // + // Pwd is the working directory for detection. If this isn't set, some + // detection may fail. Client will not default pwd to the current + // working directory for security reasons. + Src string + Dst string + Pwd string + + // Mode is the method of download the client will use. See ClientMode + // for documentation. + Mode ClientMode + + // Detectors is the list of detectors that are tried on the source. + // If this is nil, then the default Detectors will be used. + Detectors []Detector + + // Decompressors is the map of decompressors supported by this client. + // If this is nil, then the default value is the Decompressors global. + Decompressors map[string]Decompressor + + // Getters is the map of protocols supported by this client. If this + // is nil, then the default Getters variable will be used. + Getters map[string]Getter + + // Dir, if true, tells the Client it is downloading a directory (versus + // a single file). This distinction is necessary since filenames and + // directory names follow the same format so disambiguating is impossible + // without knowing ahead of time. + // + // WARNING: deprecated. If Mode is set, that will take precedence. + Dir bool +} + +// Get downloads the configured source to the destination. +func (c *Client) Get() error { + // Store this locally since there are cases we swap this + mode := c.Mode + if mode == ClientModeInvalid { + if c.Dir { + mode = ClientModeDir + } else { + mode = ClientModeFile + } + } + + // Default decompressor value + decompressors := c.Decompressors + if decompressors == nil { + decompressors = Decompressors + } + + // Detect the URL. This is safe if it is already detected. + detectors := c.Detectors + if detectors == nil { + detectors = Detectors + } + src, err := Detect(c.Src, c.Pwd, detectors) + if err != nil { + return err + } + + // Determine if we have a forced protocol, i.e. "git::http://..." + force, src := getForcedGetter(src) + + // If there is a subdir component, then we download the root separately + // and then copy over the proper subdir. + var realDst string + dst := c.Dst + src, subDir := SourceDirSubdir(src) + if subDir != "" { + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + realDst = dst + dst = tmpDir + } + + u, err := urlhelper.Parse(src) + if err != nil { + return err + } + if force == "" { + force = u.Scheme + } + + getters := c.Getters + if getters == nil { + getters = Getters + } + + g, ok := getters[force] + if !ok { + return fmt.Errorf( + "download not supported for scheme '%s'", force) + } + + // We have magic query parameters that we use to signal different features + q := u.Query() + + // Determine if we have an archive type + archiveV := q.Get("archive") + if archiveV != "" { + // Delete the paramter since it is a magic parameter we don't + // want to pass on to the Getter + q.Del("archive") + u.RawQuery = q.Encode() + + // If we can parse the value as a bool and it is false, then + // set the archive to "-" which should never map to a decompressor + if b, err := strconv.ParseBool(archiveV); err == nil && !b { + archiveV = "-" + } + } + if archiveV == "" { + // We don't appear to... but is it part of the filename? + matchingLen := 0 + for k, _ := range decompressors { + if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { + archiveV = k + matchingLen = len(k) + } + } + } + + // If we have a decompressor, then we need to change the destination + // to download to a temporary path. We unarchive this into the final, + // real path. + var decompressDst string + var decompressDir bool + decompressor := decompressors[archiveV] + if decompressor != nil { + // Create a temporary directory to store our archive. We delete + // this at the end of everything. + td, err := ioutil.TempDir("", "getter") + if err != nil { + return fmt.Errorf( + "Error creating temporary directory for archive: %s", err) + } + defer os.RemoveAll(td) + + // Swap the download directory to be our temporary path and + // store the old values. + decompressDst = dst + decompressDir = mode != ClientModeFile + dst = filepath.Join(td, "archive") + mode = ClientModeFile + } + + // Determine if we have a checksum + var checksumHash hash.Hash + var checksumValue []byte + if v := q.Get("checksum"); v != "" { + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + + // Determine the checksum hash type + checksumType := "" + idx := strings.Index(v, ":") + if idx > -1 { + checksumType = v[:idx] + } + switch checksumType { + case "md5": + checksumHash = md5.New() + case "sha1": + checksumHash = sha1.New() + case "sha256": + checksumHash = sha256.New() + case "sha512": + checksumHash = sha512.New() + default: + return fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + // Get the remainder of the value and parse it into bytes + b, err := hex.DecodeString(v[idx+1:]) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) + } + + // Set our value + checksumValue = b + } + + if mode == ClientModeAny { + // Ask the getter which client mode to use + mode, err = g.ClientMode(u) + if err != nil { + return err + } + + // Destination is the base name of the URL path in "any" mode when + // a file source is detected. + if mode == ClientModeFile { + dst = filepath.Join(dst, filepath.Base(u.Path)) + } + } + + // If we're not downloading a directory, then just download the file + // and return. + if mode == ClientModeFile { + err := g.GetFile(dst, u) + if err != nil { + return err + } + + if checksumHash != nil { + if err := checksum(dst, checksumHash, checksumValue); err != nil { + return err + } + } + + if decompressor != nil { + // We have a decompressor, so decompress the current destination + // into the final destination with the proper mode. + err := decompressor.Decompress(decompressDst, dst, decompressDir) + if err != nil { + return err + } + + // Swap the information back + dst = decompressDst + if decompressDir { + mode = ClientModeAny + } else { + mode = ClientModeFile + } + } + + // We check the dir value again because it can be switched back + // if we were unarchiving. If we're still only Get-ing a file, then + // we're done. + if mode == ClientModeFile { + return nil + } + } + + // If we're at this point we're either downloading a directory or we've + // downloaded and unarchived a directory and we're just checking subdir. + // In the case we have a decompressor we don't Get because it was Get + // above. + if decompressor == nil { + // If we're getting a directory, then this is an error. You cannot + // checksum a directory. TODO: test + if checksumHash != nil { + return fmt.Errorf( + "checksum cannot be specified for directory download") + } + + // We're downloading a directory, which might require a bit more work + // if we're specifying a subdir. + err := g.Get(dst, u) + if err != nil { + err = fmt.Errorf("error downloading '%s': %s", src, err) + return err + } + } + + // If we have a subdir, copy that over + if subDir != "" { + if err := os.RemoveAll(realDst); err != nil { + return err + } + if err := os.MkdirAll(realDst, 0755); err != nil { + return err + } + + return copyDir(realDst, filepath.Join(dst, subDir), false) + } + + return nil +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func checksum(source string, h hash.Hash, v []byte) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + if _, err := io.Copy(h, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := h.Sum(nil); !bytes.Equal(actual, v) { + return fmt.Errorf( + "Checksums did not match.\nExpected: %s\nGot: %s", + hex.EncodeToString(v), + hex.EncodeToString(actual)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go new file mode 100644 index 00000000..7f02509a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_mode.go @@ -0,0 +1,24 @@ +package getter + +// ClientMode is the mode that the client operates in. +type ClientMode uint + +const ( + ClientModeInvalid ClientMode = iota + + // ClientModeAny downloads anything it can. In this mode, dst must + // be a directory. If src is a file, it is saved into the directory + // with the basename of the URL. If src is a directory or archive, + // it is unpacked directly into dst. + ClientModeAny + + // ClientModeFile downloads a single file. In this mode, dst must + // be a file path (doesn't have to exist). src must point to a single + // file. It is saved as dst. + ClientModeFile + + // ClientModeDir downloads a directory. In this mode, dst must be + // a directory path (doesn't have to exist). src must point to an + // archive or directory (such as in s3). + ClientModeDir +) diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go new file mode 100644 index 00000000..2f58e8ae --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -0,0 +1,78 @@ +package getter + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +// +// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. +func copyDir(dst string, src string, ignoreDot bool) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go new file mode 100644 index 00000000..d18174cc --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -0,0 +1,29 @@ +package getter + +// Decompressor defines the interface that must be implemented to add +// support for decompressing a type. +type Decompressor interface { + // Decompress should decompress src to dst. dir specifies whether dst + // is a directory or single file. src is guaranteed to be a single file + // that exists. dst is not guaranteed to exist already. + Decompress(dst, src string, dir bool) error +} + +// Decompressors is the mapping of extension to the Decompressor implementation +// that will decompress that extension/type. +var Decompressors map[string]Decompressor + +func init() { + tbzDecompressor := new(TarBzip2Decompressor) + tgzDecompressor := new(TarGzipDecompressor) + + Decompressors = map[string]Decompressor{ + "bz2": new(Bzip2Decompressor), + "gz": new(GzipDecompressor), + "tar.bz2": tbzDecompressor, + "tar.gz": tgzDecompressor, + "tbz2": tbzDecompressor, + "tgz": tgzDecompressor, + "zip": new(ZipDecompressor), + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go new file mode 100644 index 00000000..339f4cf7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go @@ -0,0 +1,45 @@ +package getter + +import ( + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// Bzip2Decompressor is an implementation of Decompressor that can +// decompress bz2 files. +type Bzip2Decompressor struct{} + +func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, bzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go new file mode 100644 index 00000000..20010540 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -0,0 +1,49 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// GzipDecompressor is an implementation of Decompressor that can +// decompress bz2 files. +type GzipDecompressor struct{} + +func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("gzip-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gzipR.Close() + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, gzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go new file mode 100644 index 00000000..5391b5c8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -0,0 +1,33 @@ +package getter + +import ( + "compress/bzip2" + "os" + "path/filepath" +) + +// TarBzip2Decompressor is an implementation of Decompressor that can +// decompress tar.bz2 files. +type TarBzip2Decompressor struct{} + +func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + return untar(bzipR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go new file mode 100644 index 00000000..82b8ab4f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -0,0 +1,135 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/mitchellh/go-testing-interface" +) + +// TestDecompressCase is a single test case for testing decompressors +type TestDecompressCase struct { + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file +} + +// TestDecompressor is a helper function for testing generic decompressors. +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { + for _, tc := range cases { + t.Logf("Testing: %s", tc.Input) + + // Temporary dir to store stuff + td, err := ioutil.TempDir("", "getter") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Destination is always joining result so that we have a new path + dst := filepath.Join(td, "subdir", "result") + + // We use a function so defers work + func() { + defer os.RemoveAll(td) + + // Decompress + err := d.Decompress(dst, tc.Input, tc.Dir) + if (err != nil) != tc.Err { + t.Fatalf("err %s: %s", tc.Input, err) + } + if tc.Err { + return + } + + // If it isn't a directory, then check for a single file + if !tc.Dir { + fi, err := os.Stat(dst) + if err != nil { + t.Fatalf("err %s: %s", tc.Input, err) + } + if fi.IsDir() { + t.Fatalf("err %s: expected file, got directory", tc.Input) + } + if tc.FileMD5 != "" { + actual := testMD5(t, dst) + expected := tc.FileMD5 + if actual != expected { + t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) + } + } + + return + } + + // Convert expected for windows + expected := tc.DirList + if runtime.GOOS == "windows" { + for i, v := range expected { + expected[i] = strings.Replace(v, "/", "\\", -1) + } + } + + // Directory, check for the correct contents + actual := testListDir(t, dst) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) + } + }() + } +} + +func testListDir(t testing.T, path string) []string { + var result []string + err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + sub = strings.TrimPrefix(sub, path) + if sub == "" { + return nil + } + sub = sub[1:] // Trim the leading path sep. + + // If it is a dir, add trailing sep + if info.IsDir() { + sub += string(os.PathSeparator) + } + + result = append(result, sub) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(result) + return result +} + +func testMD5(t testing.T, path string) string { + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + h := md5.New() + _, err = io.Copy(h, f) + if err != nil { + t.Fatalf("err: %s", err) + } + + result := h.Sum(nil) + return hex.EncodeToString(result) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go new file mode 100644 index 00000000..65eb70dd --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "os" + "path/filepath" +) + +// TarGzipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type TarGzipDecompressor struct{} + +func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) + } + defer gzipR.Close() + + return untar(gzipR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go new file mode 100644 index 00000000..a065c076 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -0,0 +1,96 @@ +package getter + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" +) + +// ZipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type ZipDecompressor struct{} + +func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // Open the zip + zipR, err := zip.OpenReader(src) + if err != nil { + return err + } + defer zipR.Close() + + // Check the zip integrity + if len(zipR.File) == 0 { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + if !dir && len(zipR.File) > 1 { + return fmt.Errorf("expected a single file: %s", src) + } + + // Go through and unarchive + for _, f := range zipR.File { + path := dst + if dir { + path = filepath.Join(path, f.Name) + } + + if f.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // Create the enclosing directories if we must. ZIP files aren't + // required to contain entries for just the directories so this + // can happen. + if dir { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + } + + // Open the file for reading + srcF, err := f.Open() + if err != nil { + return err + } + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + srcF.Close() + return err + } + _, err = io.Copy(dstF, srcF) + srcF.Close() + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, f.Mode()); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go new file mode 100644 index 00000000..481b737c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -0,0 +1,97 @@ +package getter + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/go-getter/helper/url" +) + +// Detector defines the interface that an invalid URL or a URL with a blank +// scheme is passed through in order to determine if its shorthand for +// something else well-known. +type Detector interface { + // Detect will detect whether the string matches a known pattern to + // turn it into a proper URL. + Detect(string, string) (string, bool, error) +} + +// Detectors is the list of detectors that are tried on an invalid URL. +// This is also the order they're tried (index 0 is first). +var Detectors []Detector + +func init() { + Detectors = []Detector{ + new(GitHubDetector), + new(BitBucketDetector), + new(S3Detector), + new(FileDetector), + } +} + +// Detect turns a source string into another source string if it is +// detected to be of a known pattern. +// +// The third parameter should be the list of detectors to use in the +// order to try them. If you don't want to configure this, just use +// the global Detectors variable. +// +// This is safe to be called with an already valid source string: Detect +// will just return it. +func Detect(src string, pwd string, ds []Detector) (string, error) { + getForce, getSrc := getForcedGetter(src) + + // Separate out the subdir if there is one, we don't pass that to detect + getSrc, subDir := SourceDirSubdir(getSrc) + + u, err := url.Parse(getSrc) + if err == nil && u.Scheme != "" { + // Valid URL + return src, nil + } + + for _, d := range ds { + result, ok, err := d.Detect(getSrc, pwd) + if err != nil { + return "", err + } + if !ok { + continue + } + + var detectForce string + detectForce, result = getForcedGetter(result) + result, detectSubdir := SourceDirSubdir(result) + + // If we have a subdir from the detection, then prepend it to our + // requested subdir. + if detectSubdir != "" { + if subDir != "" { + subDir = filepath.Join(detectSubdir, subDir) + } else { + subDir = detectSubdir + } + } + if subDir != "" { + u, err := url.Parse(result) + if err != nil { + return "", fmt.Errorf("Error parsing URL: %s", err) + } + u.Path += "//" + subDir + result = u.String() + } + + // Preserve the forced getter if it exists. We try to use the + // original set force first, followed by any force set by the + // detector. + if getForce != "" { + result = fmt.Sprintf("%s::%s", getForce, result) + } else if detectForce != "" { + result = fmt.Sprintf("%s::%s", detectForce, result) + } + + return result, nil + } + + return "", fmt.Errorf("invalid source string: %s", src) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go new file mode 100644 index 00000000..a183a17d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go @@ -0,0 +1,66 @@ +package getter + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" +) + +// BitBucketDetector implements Detector to detect BitBucket URLs and turn +// them into URLs that the Git or Hg Getter can understand. +type BitBucketDetector struct{} + +func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "bitbucket.org/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { + u, err := url.Parse("https://" + src) + if err != nil { + return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) + } + + // We need to get info on this BitBucket repository to determine whether + // it is Git or Hg. + var info struct { + SCM string `json:"scm"` + } + infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path + resp, err := http.Get(infoUrl) + if err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + if resp.StatusCode == 403 { + // A private repo + return "", true, fmt.Errorf( + "shorthand BitBucket URL can't be used for private repos, " + + "please use a full URL") + } + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&info); err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + + switch info.SCM { + case "git": + if !strings.HasSuffix(u.Path, ".git") { + u.Path += ".git" + } + + return "git::" + u.String(), true, nil + case "hg": + return "hg::" + u.String(), true, nil + default: + return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go new file mode 100644 index 00000000..756ea43f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +// FileDetector implements Detector to detect file paths. +type FileDetector struct{} + +func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + if pwd == "" { + return "", true, fmt.Errorf( + "relative paths require a module with a pwd") + } + + // Stat the pwd to determine if its a symbolic link. If it is, + // then the pwd becomes the original directory. Otherwise, + // `filepath.Join` below does some weird stuff. + // + // We just ignore if the pwd doesn't exist. That error will be + // caught later when we try to use the URL. + if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { + if err != nil { + return "", true, err + } + if fi.Mode()&os.ModeSymlink != 0 { + pwd, err = os.Readlink(pwd) + if err != nil { + return "", true, err + } + + // The symlink itself might be a relative path, so we have to + // resolve this to have a correctly rooted URL. + pwd, err = filepath.Abs(pwd) + if err != nil { + return "", true, err + } + } + } + + src = filepath.Join(pwd, src) + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go new file mode 100644 index 00000000..c084ad9a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -0,0 +1,73 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GitHubDetector implements Detector to detect GitHub URLs and turn +// them into URLs that the Git Getter can understand. +type GitHubDetector struct{} + +func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "github.com/") { + return d.detectHTTP(src) + } else if strings.HasPrefix(src, "git@github.com:") { + return d.detectSSH(src) + } + + return "", false, nil +} + +func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 3 { + return "", false, fmt.Errorf( + "GitHub URLs should be github.com/username/repo") + } + + urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) + } + + if !strings.HasSuffix(url.Path, ".git") { + url.Path += ".git" + } + + if len(parts) > 3 { + url.Path += "//" + strings.Join(parts[3:], "/") + } + + return "git::" + url.String(), true, nil +} + +func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { + idx := strings.Index(src, ":") + qidx := strings.Index(src, "?") + if qidx == -1 { + qidx = len(src) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User("git") + u.Host = "github.com" + u.Path = src[idx+1 : qidx] + if qidx < len(src) { + q, err := url.ParseQuery(src[qidx+1:]) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + + u.RawQuery = q.Encode() + } + + return "git::" + u.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go new file mode 100644 index 00000000..8e0f4a03 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go @@ -0,0 +1,61 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// S3Detector implements Detector to detect S3 URLs and turn +// them into URLs that the S3 getter can understand. +type S3Detector struct{} + +func (d *S3Detector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, ".amazonaws.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *S3Detector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 2 { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } + + hostParts := strings.Split(parts[0], ".") + if len(hostParts) == 3 { + return d.detectPathStyle(hostParts[0], parts[1:]) + } else if len(hostParts) == 4 { + return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) + } else { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } +} + +func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} + +func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go new file mode 100644 index 00000000..647ccf45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go @@ -0,0 +1,65 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// FolderStorage is an implementation of the Storage interface that manages +// modules on the disk. +type FolderStorage struct { + // StorageDir is the directory where the modules will be stored. + StorageDir string +} + +// Dir implements Storage.Dir +func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { + d = s.dir(key) + _, err = os.Stat(d) + if err == nil { + // Directory exists + e = true + return + } + if os.IsNotExist(err) { + // Directory doesn't exist + d = "" + e = false + err = nil + return + } + + // An error + d = "" + e = false + return +} + +// Get implements Storage.Get +func (s *FolderStorage) Get(key string, source string, update bool) error { + dir := s.dir(key) + if !update { + if _, err := os.Stat(dir); err == nil { + // If the directory already exists, then we're done since + // we're not updating. + return nil + } else if !os.IsNotExist(err) { + // If the error we got wasn't a file-not-exist error, then + // something went wrong and we should report it. + return fmt.Errorf("Error reading module directory: %s", err) + } + } + + // Get the source. This always forces an update. + return Get(dir, source) +} + +// dir returns the directory name internally that we'll use to map to +// internally. +func (s *FolderStorage) dir(key string) string { + sum := md5.Sum([]byte(key)) + return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) +} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go new file mode 100644 index 00000000..c3236f55 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -0,0 +1,139 @@ +// getter is a package for downloading files or directories from a variety of +// protocols. +// +// getter is unique in its ability to download both directories and files. +// It also detects certain source strings to be protocol-specific URLs. For +// example, "github.com/hashicorp/go-getter" would turn into a Git URL and +// use the Git protocol. +// +// Protocols and detectors are extensible. +// +// To get started, see Client. +package getter + +import ( + "bytes" + "fmt" + "net/url" + "os/exec" + "regexp" + "syscall" +) + +// Getter defines the interface that schemes must implement to download +// things. +type Getter interface { + // Get downloads the given URL into the given directory. This always + // assumes that we're updating and gets the latest version that it can. + // + // The directory may already exist (if we're updating). If it is in a + // format that isn't understood, an error should be returned. Get shouldn't + // simply nuke the directory. + Get(string, *url.URL) error + + // GetFile downloads the give URL into the given path. The URL must + // reference a single file. If possible, the Getter should check if + // the remote end contains the same file and no-op this operation. + GetFile(string, *url.URL) error + + // ClientMode returns the mode based on the given URL. This is used to + // allow clients to let the getters decide which mode to use. + ClientMode(*url.URL) (ClientMode, error) +} + +// Getters is the mapping of scheme to the Getter implementation that will +// be used to get a dependency. +var Getters map[string]Getter + +// forcedRegexp is the regular expression that finds forced getters. This +// syntax is schema::url, example: git::https://foo.com +var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) + +func init() { + httpGetter := &HttpGetter{Netrc: true} + + Getters = map[string]Getter{ + "file": new(FileGetter), + "git": new(GitGetter), + "hg": new(HgGetter), + "s3": new(S3Getter), + "http": httpGetter, + "https": httpGetter, + } +} + +// Get downloads the directory specified by src into the folder specified by +// dst. If dst already exists, Get will attempt to update it. +// +// src is a URL, whereas dst is always just a file path to a folder. This +// folder doesn't need to exist. It will be created if it doesn't exist. +func Get(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: true, + Getters: Getters, + }).Get() +} + +// GetAny downloads a URL into the given destination. Unlike Get or +// GetFile, both directories and files are supported. +// +// dst must be a directory. If src is a file, it will be downloaded +// into dst with the basename of the URL. If src is a directory or +// archive, it will be unpacked directly into dst. +func GetAny(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Mode: ClientModeAny, + Getters: Getters, + }).Get() +} + +// GetFile downloads the file specified by src into the path specified by +// dst. +func GetFile(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: false, + Getters: Getters, + }).Get() +} + +// getRunCommand is a helper that will run a command and capture the output +// in the case an error happens. +func getRunCommand(cmd *exec.Cmd) error { + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + err := cmd.Run() + if err == nil { + return nil + } + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return fmt.Errorf( + "%s exited with %d: %s", + cmd.Path, + status.ExitStatus(), + buf.String()) + } + } + + return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) +} + +// getForcedGetter takes a source and returns the tuple of the forced +// getter and the raw URL (without the force syntax). +func getForcedGetter(src string) (string, string) { + var forced string + if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { + forced = ms[1] + src = ms[2] + } + + return forced, src +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go new file mode 100644 index 00000000..e5d2d61d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -0,0 +1,32 @@ +package getter + +import ( + "net/url" + "os" +) + +// FileGetter is a Getter implementation that will download a module from +// a file scheme. +type FileGetter struct { + // Copy, if set to true, will copy data instead of using a symlink + Copy bool +} + +func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + fi, err := os.Stat(path) + if err != nil { + return 0, err + } + + // Check if the source is a directory. + if fi.IsDir() { + return ClientModeDir, nil + } + + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go new file mode 100644 index 00000000..c89a2d5a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -0,0 +1,103 @@ +// +build !windows + +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + return os.Symlink(path, dst) +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a file to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(path, dst) + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, srcF) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go new file mode 100644 index 00000000..f87ed0a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -0,0 +1,120 @@ +// +build windows + +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + sourcePath := toBackslash(path) + + // Use mklink to create a junction point + output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) + } + + return nil +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(path, dst) + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, srcF) + return err +} + +// toBackslash returns the result of replacing each slash character +// in path with a backslash ('\') character. Multiple separators are +// replaced by multiple backslashes. +func toBackslash(path string) string { + return strings.Replace(path, "/", "\\", -1) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go new file mode 100644 index 00000000..07281398 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -0,0 +1,225 @@ +package getter + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-version" +) + +// GitGetter is a Getter implementation that will download a module from +// a git repository. +type GitGetter struct{} + +func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *GitGetter) Get(dst string, u *url.URL) error { + if _, err := exec.LookPath("git"); err != nil { + return fmt.Errorf("git must be available and on the PATH") + } + + // Extract some query parameters we use + var ref, sshKey string + q := u.Query() + if len(q) > 0 { + ref = q.Get("ref") + q.Del("ref") + + sshKey = q.Get("sshkey") + q.Del("sshkey") + + // Copy the URL + var newU url.URL = *u + u = &newU + u.RawQuery = q.Encode() + } + + var sshKeyFile string + if sshKey != "" { + // Check that the git version is sufficiently new. + if err := checkGitVersion("2.3"); err != nil { + return fmt.Errorf("Error using ssh key: %v", err) + } + + // We have an SSH key - decode it. + raw, err := base64.StdEncoding.DecodeString(sshKey) + if err != nil { + return err + } + + // Create a temp file for the key and ensure it is removed. + fh, err := ioutil.TempFile("", "go-getter") + if err != nil { + return err + } + sshKeyFile = fh.Name() + defer os.Remove(sshKeyFile) + + // Set the permissions prior to writing the key material. + if err := os.Chmod(sshKeyFile, 0600); err != nil { + return err + } + + // Write the raw key into the temp file. + _, err = fh.Write(raw) + fh.Close() + if err != nil { + return err + } + } + + // Clone or update the repository + _, err := os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + err = g.update(dst, sshKeyFile, ref) + } else { + err = g.clone(dst, sshKeyFile, u) + } + if err != nil { + return err + } + + // Next: check out the proper tag/branch if it is specified, and checkout + if ref != "" { + if err := g.checkout(dst, ref); err != nil { + return err + } + } + + // Lastly, download any/all submodules. + return g.fetchSubmodules(dst, sshKeyFile) +} + +// GetFile for Git doesn't support updating at this time. It will download +// the file every time. +func (g *GitGetter) GetFile(dst string, u *url.URL) error { + td, err := ioutil.TempDir("", "getter-git") + if err != nil { + return err + } + if err := os.RemoveAll(td); err != nil { + return err + } + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.Dir(u.Path) + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *GitGetter) checkout(dst string, ref string) error { + cmd := exec.Command("git", "checkout", ref) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { + cmd := exec.Command("git", "clone", u.String(), dst) + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +func (g *GitGetter) update(dst, sshKeyFile, ref string) error { + // Determine if we're a branch. If we're NOT a branch, then we just + // switch to master prior to checking out + cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd.Dir = dst + + if getRunCommand(cmd) != nil { + // Not a branch, switch to master. This will also catch non-existent + // branches, in which case we want to switch to master and then + // checkout the proper branch later. + ref = "master" + } + + // We have to be on a branch to pull + if err := g.checkout(dst, ref); err != nil { + return err + } + + cmd = exec.Command("git", "pull", "--ff-only") + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// fetchSubmodules downloads any configured submodules recursively. +func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { + cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// setupGitEnv sets up the environment for the given command. This is used to +// pass configuration data to git and ssh and enables advanced cloning methods. +func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { + var sshOpts []string + + if sshKeyFile != "" { + // We have an SSH key temp file configured, tell ssh about this. + sshOpts = append(sshOpts, "-i", sshKeyFile) + } + + cmd.Env = append(os.Environ(), + // Set the ssh command to use for clones. + "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "), + ) +} + +// checkGitVersion is used to check the version of git installed on the system +// against a known minimum version. Returns an error if the installed version +// is older than the given minimum. +func checkGitVersion(min string) error { + want, err := version.NewVersion(min) + if err != nil { + return err + } + + out, err := exec.Command("git", "version").Output() + if err != nil { + return err + } + + fields := strings.Fields(string(out)) + if len(fields) != 3 { + return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) + } + + have, err := version.NewVersion(fields[2]) + if err != nil { + return err + } + + if have.LessThan(want) { + return fmt.Errorf("Required git version = %s, have %s", want, have) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go new file mode 100644 index 00000000..820bdd48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -0,0 +1,131 @@ +package getter + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// HgGetter is a Getter implementation that will download a module from +// a Mercurial repository. +type HgGetter struct{} + +func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *HgGetter) Get(dst string, u *url.URL) error { + if _, err := exec.LookPath("hg"); err != nil { + return fmt.Errorf("hg must be available and on the PATH") + } + + newURL, err := urlhelper.Parse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + + // Extract some query parameters we use + var rev string + q := newURL.Query() + if len(q) > 0 { + rev = q.Get("rev") + q.Del("rev") + + newURL.RawQuery = q.Encode() + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err != nil { + if err := g.clone(dst, newURL); err != nil { + return err + } + } + + if err := g.pull(dst, newURL); err != nil { + return err + } + + return g.update(dst, newURL, rev) +} + +// GetFile for Hg doesn't support updating at this time. It will download +// the file every time. +func (g *HgGetter) GetFile(dst string, u *url.URL) error { + td, err := ioutil.TempDir("", "getter-hg") + if err != nil { + return err + } + if err := os.RemoveAll(td); err != nil { + return err + } + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.ToSlash(filepath.Dir(u.Path)) + + // If we're on Windows, we need to set the host to "localhost" for hg + if runtime.GOOS == "windows" { + u.Host = "localhost" + } + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *HgGetter) clone(dst string, u *url.URL) error { + cmd := exec.Command("hg", "clone", "-U", u.String(), dst) + return getRunCommand(cmd) +} + +func (g *HgGetter) pull(dst string, u *url.URL) error { + cmd := exec.Command("hg", "pull") + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *HgGetter) update(dst string, u *url.URL, rev string) error { + args := []string{"update"} + if rev != "" { + args = append(args, rev) + } + + cmd := exec.Command("hg", args...) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go new file mode 100644 index 00000000..661d8989 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -0,0 +1,227 @@ +package getter + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" +) + +// HttpGetter is a Getter implementation that will download from an HTTP +// endpoint. +// +// For file downloads, HTTP is used directly. +// +// The protocol for downloading a directory from an HTTP endpoing is as follows: +// +// An HTTP GET request is made to the URL with the additional GET parameter +// "terraform-get=1". This lets you handle that scenario specially if you +// wish. The response must be a 2xx. +// +// First, a header is looked for "X-Terraform-Get" which should contain +// a source URL to download. +// +// If the header is not present, then a meta tag is searched for named +// "terraform-get" and the content should be a source URL. +// +// The source URL, whether from the header or meta tag, must be a fully +// formed URL. The shorthand syntax of "github.com/foo/bar" or relative +// paths are not allowed. +type HttpGetter struct { + // Netrc, if true, will lookup and use auth information found + // in the user's netrc file if available. + Netrc bool +} + +func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { + if strings.HasSuffix(u.Path, "/") { + return ClientModeDir, nil + } + return ClientModeFile, nil +} + +func (g *HttpGetter) Get(dst string, u *url.URL) error { + // Copy the URL so we can modify it + var newU url.URL = *u + u = &newU + + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + // Add terraform-get to the parameter. + q := u.Query() + q.Add("terraform-get", "1") + u.RawQuery = q.Encode() + + // Get the URL + resp, err := http.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Extract the source URL + var source string + if v := resp.Header.Get("X-Terraform-Get"); v != "" { + source = v + } else { + source, err = g.parseMeta(resp.Body) + if err != nil { + return err + } + } + if source == "" { + return fmt.Errorf("no source URL was returned") + } + + // If there is a subdir component, then we download the root separately + // into a temporary directory, then copy over the proper subdir. + source, subDir := SourceDirSubdir(source) + if subDir == "" { + return Get(dst, source) + } + + // We have a subdir, time to jump some hoops + return g.getSubdir(dst, source, subDir) +} + +func (g *HttpGetter) GetFile(dst string, u *url.URL) error { + + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + resp, err := http.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} + +// getSubdir downloads the source into the destination, but with +// the proper subdir. +func (g *HttpGetter) getSubdir(dst, source, subDir string) error { + // Create a temporary directory to store the full source + td, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + defer os.RemoveAll(td) + + // Download that into the given directory + if err := Get(td, source); err != nil { + return err + } + + // Make sure the subdir path actually exists + sourcePath := filepath.Join(td, subDir) + if _, err := os.Stat(sourcePath); err != nil { + return fmt.Errorf( + "Error downloading %s: %s", source, err) + } + + // Copy the subdirectory into our actual destination. + if err := os.RemoveAll(dst); err != nil { + return err + } + + // Make the final destination + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + return copyDir(dst, sourcePath, false) +} + +// parseMeta looks for the first meta tag in the given reader that +// will give us the source URL. +func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var err error + var t xml.Token + for { + t, err = d.Token() + if err != nil { + if err == io.EOF { + err = nil + } + return "", err + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + return "", nil + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + return "", nil + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "terraform-get" { + continue + } + if f := attrValue(e.Attr, "content"); f != "" { + return f, nil + } + } +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} + +// charsetReader returns a reader for the given charset. Currently +// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go new file mode 100644 index 00000000..882e694d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -0,0 +1,52 @@ +package getter + +import ( + "net/url" +) + +// MockGetter is an implementation of Getter that can be used for tests. +type MockGetter struct { + // Proxy, if set, will be called after recording the calls below. + // If it isn't set, then the *Err values will be returned. + Proxy Getter + + GetCalled bool + GetDst string + GetURL *url.URL + GetErr error + + GetFileCalled bool + GetFileDst string + GetFileURL *url.URL + GetFileErr error +} + +func (g *MockGetter) Get(dst string, u *url.URL) error { + g.GetCalled = true + g.GetDst = dst + g.GetURL = u + + if g.Proxy != nil { + return g.Proxy.Get(dst, u) + } + + return g.GetErr +} + +func (g *MockGetter) GetFile(dst string, u *url.URL) error { + g.GetFileCalled = true + g.GetFileDst = dst + g.GetFileURL = u + + if g.Proxy != nil { + return g.Proxy.GetFile(dst, u) + } + return g.GetFileErr +} + +func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { + if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { + return ClientModeDir, nil + } + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go new file mode 100644 index 00000000..d3bffeb1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -0,0 +1,243 @@ +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3Getter is a Getter implementation that will download a module from +// a S3 bucket. +type S3Getter struct{} + +func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return 0, err + } + + // Create client config + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + + // List the object(s) at the given prefix + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + resp, err := client.ListObjects(req) + if err != nil { + return 0, err + } + + for _, o := range resp.Contents { + // Use file mode on exact match. + if *o.Key == path { + return ClientModeFile, nil + } + + // Use dir mode if child keys are found. + if strings.HasPrefix(*o.Key, path+"/") { + return ClientModeDir, nil + } + } + + // There was no match, so just return file mode. The download is going + // to fail but we will let S3 return the proper error later. + return ClientModeFile, nil +} + +func (g *S3Getter) Get(dst string, u *url.URL) error { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + + // List files in path, keep listing until no more objects are found + lastMarker := "" + hasMore := true + for hasMore { + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + if lastMarker != "" { + req.Marker = aws.String(lastMarker) + } + + resp, err := client.ListObjects(req) + if err != nil { + return err + } + + hasMore = aws.BoolValue(resp.IsTruncated) + + // Get each object storing each file relative to the destination path + for _, object := range resp.Contents { + lastMarker = aws.StringValue(object.Key) + objPath := aws.StringValue(object.Key) + + // If the key ends with a backslash assume it is a directory and ignore + if strings.HasSuffix(objPath, "/") { + continue + } + + // Get the object destination path + objDst, err := filepath.Rel(path, objPath) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + + if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { + return err + } + } + } + + return nil +} + +func (g *S3Getter) GetFile(dst string, u *url.URL) error { + region, bucket, path, version, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + return g.getObject(client, dst, bucket, path, version) +} + +func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { + req := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if version != "" { + req.VersionId = aws.String(version) + } + + resp, err := client.GetObject(req) + if err != nil { + return err + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} + +func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { + conf := &aws.Config{} + if creds == nil { + // Grab the metadata URL + metadataURL := os.Getenv("AWS_METADATA_URL") + if metadataURL == "" { + metadataURL = "http://169.254.169.254:80/latest" + } + + creds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(&aws.Config{ + Endpoint: aws.String(metadataURL), + })), + }, + }) + } + + conf.Credentials = creds + if region != "" { + conf.Region = aws.String(region) + } + + return conf +} + +func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } + + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + + _, hasAwsId := u.Query()["aws_access_key_id"] + _, hasAwsSecret := u.Query()["aws_access_key_secret"] + _, hasAwsToken := u.Query()["aws_access_token"] + if hasAwsId || hasAwsSecret || hasAwsToken { + creds = credentials.NewStaticCredentials( + u.Query().Get("aws_access_key_id"), + u.Query().Get("aws_access_key_secret"), + u.Query().Get("aws_access_token"), + ) + } + + return +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go new file mode 100644 index 00000000..02497c25 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go @@ -0,0 +1,14 @@ +package url + +import ( + "net/url" +) + +// Parse parses rawURL into a URL structure. +// The rawURL may be relative or absolute. +// +// Parse is a wrapper for the Go stdlib net/url Parse function, but returns +// Windows "safe" URLs on Windows platforms. +func Parse(rawURL string) (*url.URL, error) { + return parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go new file mode 100644 index 00000000..ed1352a9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package url + +import ( + "net/url" +) + +func parse(rawURL string) (*url.URL, error) { + return url.Parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go new file mode 100644 index 00000000..4655226f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go @@ -0,0 +1,40 @@ +package url + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" +) + +func parse(rawURL string) (*url.URL, error) { + // Make sure we're using "/" since URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Scheme, and the rest of the path + // has been parsed into the URL Path without the leading ':' character. + u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path) + u.Scheme = "" + } + + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + + // Remove leading slash for absolute file paths. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go new file mode 100644 index 00000000..c7f6a3fb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/netrc.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "net/url" + "os" + "runtime" + + "github.com/bgentry/go-netrc/netrc" + "github.com/mitchellh/go-homedir" +) + +// addAuthFromNetrc adds auth information to the URL from the user's +// netrc file if it can be found. This will only add the auth info +// if the URL doesn't already have auth info specified and the +// the username is blank. +func addAuthFromNetrc(u *url.URL) error { + // If the URL already has auth information, do nothing + if u.User != nil && u.User.Username() != "" { + return nil + } + + // Get the netrc file path + path := os.Getenv("NETRC") + if path == "" { + filename := ".netrc" + if runtime.GOOS == "windows" { + filename = "_netrc" + } + + var err error + path, err = homedir.Expand("~/" + filename) + if err != nil { + return err + } + } + + // If the file is not a file, then do nothing + if fi, err := os.Stat(path); err != nil { + // File doesn't exist, do nothing + if os.IsNotExist(err) { + return nil + } + + // Some other error! + return err + } else if fi.IsDir() { + // File is directory, ignore + return nil + } + + // Load up the netrc file + net, err := netrc.ParseFile(path) + if err != nil { + return fmt.Errorf("Error parsing netrc file at %q: %s", path, err) + } + + machine := net.FindMachine(u.Host) + if machine == nil { + // Machine not found, no problem + return nil + } + + // Set the user info + u.User = url.UserPassword(machine.Login, machine.Password) + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go new file mode 100644 index 00000000..4d5ee3cc --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -0,0 +1,36 @@ +package getter + +import ( + "strings" +) + +// SourceDirSubdir takes a source and returns a tuple of the URL without +// the subdir and the URL with the subdir. +func SourceDirSubdir(src string) (string, string) { + // Calcaulate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src, "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go new file mode 100644 index 00000000..2bc6b9ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/storage.go @@ -0,0 +1,13 @@ +package getter + +// Storage is an interface that knows how to lookup downloaded directories +// as well as download and update directories from their sources into the +// proper location. +type Storage interface { + // Dir returns the directory on local disk where the directory source + // can be loaded from. + Dir(string) (string, bool, error) + + // Get will download and optionally update the given directory. + Get(string, string, bool) error +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 00000000..ead5830f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,97 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 00000000..775b6e75 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 00000000..aab8e9ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 00000000..6c7a3cc9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\n%s", + len(es), strings.Join(points, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 00000000..2ea08273 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementatin of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implementd only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 00000000..5c477abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 00000000..02565c8c --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, purely random bytes. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 00000000..ff9364c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,65 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + buf := make([]byte, size) + if _, err := rand.Read(buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + buf, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if len(buf) != 16 { + return "", fmt.Errorf("wrong length byte slice (%d)", len(buf)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 36 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + hyph := []byte("-") + + if uuid[8] != hyph[0] || + uuid[13] != hyph[0] || + uuid[18] != hyph[0] || + uuid[23] != hyph[0] { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != 16 { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 00000000..6f3a15ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 00000000..8c73df06 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,178 @@ +package version + +import ( + "fmt" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 00000000..dfe509ca --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,322 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var versionRegexp *regexp.Regexp + +// The raw regular expression string used for testing the validity +// of a version. +const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `?` + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + matches := versionRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + return &Version{ + metadata: matches[7], + pre: matches[4], + segments: segments, + si: si, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// or GreaterThan methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + selfNumeric := true + _, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + otherNumeric := true + _, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if preSelf > preOther { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + return v.segments +} + +// String returns the full version string included pre-release +// and metadata information. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 00000000..cc888d43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 00000000..84fd743f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 00000000..c8223326 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 00000000..6e75ece8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,724 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for fieldType, field := range fields { + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, field) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, field) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { + return err + } + } + + decodedFields = append(decodedFields, fieldType.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 00000000..575a20b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 00000000..6e5ef654 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 00000000..ba07ad42 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 00000000..5c99381d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 00000000..b4881806 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,520 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 00000000..69662367 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,651 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // If we see a null character with data left, then that is an error + if ch == '\x00' && s.buf.Len() > 0 { + s.err("unexpected null character (0x00)") + return eof + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 00000000..5f981eaa --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 00000000..59c1bb72 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 00000000..e37c0664 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 00000000..125a5f07 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 00000000..dd5c72bb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 00000000..59c1bb72 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 00000000..95a0c3ee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 00000000..d9993c29 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 00000000..1fca53c4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md new file mode 100644 index 00000000..186ed251 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/README.md @@ -0,0 +1,102 @@ +# HIL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) + +HIL (HashiCorp Interpolation Language) is a lightweight embedded language used +primarily for configuration interpolation. The goal of HIL is to make a simple +language for interpolations in the various configurations of HashiCorp tools. + +HIL is built to interpolate any string, but is in use by HashiCorp primarily +with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any +way for use with HIL. + +HIL isn't meant to be a general purpose language. It was built for basic +configuration interpolations. Therefore, you can't currently write functions, +have conditionals, set intermediary variables, etc. within HIL itself. It is +possible some of these may be added later but the right use case must exist. + +## Why? + +Many of our tools have support for something similar to templates, but +within the configuration itself. The most prominent requirement was in +[Terraform](https://github.com/hashicorp/terraform) where we wanted the +configuration to be able to reference values from elsewhere in the +configuration. Example: + + foo = "hi ${var.world}" + +We originally used a full templating language for this, but found it +was too heavy weight. Additionally, many full languages required bindings +to C (and thus the usage of cgo) which we try to avoid to make cross-compilation +easier. We then moved to very basic regular expression based +string replacement, but found the need for basic arithmetic and function +calls resulting in overly complex regular expressions. + +Ultimately, we wrote our own mini-language within Terraform itself. As +we built other projects such as [Nomad](https://nomadproject.io) and +[Otto](https://ottoproject.io), the need for basic interpolations arose +again. + +Thus HIL was born. It is extracted from Terraform, cleaned up, and +better tested for general purpose use. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammer is listed here. + +Code begins within `${` and `}`. Outside of this, text is treated +literally. For example, `foo` is a valid HIL program that is just the +string "foo", but `foo ${bar}` is an HIL program that is the string "foo " +concatened with the value of `bar`. For the remainder of the syntax +docs, we'll assume you're within `${}`. + + * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example + identifiers: `foo`, `var.foo`, `foo-bar`. + + * Strings are double quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Numbers are assumed to be base 10. If you prefix a number with 0x, + it is treated as a hexadecimal. If it is prefixed with 0, it is + treated as an octal. Numbers can be in scientific notation: "1e10". + + * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2` + + * Boolean values: `true`, `false` + + * The following arithmetic operations are allowed: +, -, *, /, %. + + * Function calls are in the form of `name(arg1, arg2, ...)`. Example: + `add(1, 5)`. Arguments can be any valid HIL expression, example: + `add(1, var.foo)` or even nested function calls: + `add(1, get("some value"))`. + + * Within strings, further interpolations can be opened with `${}`. + Example: `"Hello ${nested}"`. A full example including the + original `${}` (remember this list assumes were inside of one + already) could be: `foo ${func("hello ${var.foo}")}`. + +## Language Changes + +We've used this mini-language in Terraform for years. For backwards compatibility +reasons, we're unlikely to make an incompatible change to the language but +we're not currently making that promise, either. + +The internal API of this project may very well change as we evolve it +to work with more of our projects. We recommend using some sort of dependency +management solution with this package. + +## Future Changes + +The following changes are already planned to be made at some point: + + * Richer types: lists, maps, etc. + + * Convert to a more standard Go parser structure similar to HCL. This + will improve our error messaging as well as allow us to have automatic + formatting. + + * Allow interpolations to result in more types than just a string. While + within the interpolation basic types are honored, the result is always + a string. diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml new file mode 100644 index 00000000..feaf7a34 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/appveyor.yml @@ -0,0 +1,18 @@ +version: "build-{branch}-{build}" +image: Visual Studio 2015 +clone_folder: c:\gopath\src\github.com\hashicorp\hil +environment: + GOPATH: c:\gopath +init: + - git config --global core.autocrlf true +install: +- cmd: >- + echo %Path% + + go version + + go env + + go get -d -v -t ./... +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go new file mode 100644 index 00000000..94dc24f8 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/arithmetic.go @@ -0,0 +1,43 @@ +package ast + +import ( + "bytes" + "fmt" +) + +// Arithmetic represents a node where the result is arithmetic of +// two or more operands in the order given. +type Arithmetic struct { + Op ArithmeticOp + Exprs []Node + Posx Pos +} + +func (n *Arithmetic) Accept(v Visitor) Node { + for i, expr := range n.Exprs { + n.Exprs[i] = expr.Accept(v) + } + + return v(n) +} + +func (n *Arithmetic) Pos() Pos { + return n.Posx +} + +func (n *Arithmetic) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *Arithmetic) String() string { + var b bytes.Buffer + for _, expr := range n.Exprs { + b.WriteString(fmt.Sprintf("%s", expr)) + } + + return b.String() +} + +func (n *Arithmetic) Type(Scope) (Type, error) { + return TypeInt, nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go new file mode 100644 index 00000000..18880c60 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go @@ -0,0 +1,24 @@ +package ast + +// ArithmeticOp is the operation to use for the math. +type ArithmeticOp int + +const ( + ArithmeticOpInvalid ArithmeticOp = 0 + + ArithmeticOpAdd ArithmeticOp = iota + ArithmeticOpSub + ArithmeticOpMul + ArithmeticOpDiv + ArithmeticOpMod + + ArithmeticOpLogicalAnd + ArithmeticOpLogicalOr + + ArithmeticOpEqual + ArithmeticOpNotEqual + ArithmeticOpLessThan + ArithmeticOpLessThanOrEqual + ArithmeticOpGreaterThan + ArithmeticOpGreaterThanOrEqual +) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go new file mode 100644 index 00000000..c6350f8b --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/ast.go @@ -0,0 +1,99 @@ +package ast + +import ( + "fmt" +) + +// Node is the interface that all AST nodes must implement. +type Node interface { + // Accept is called to dispatch to the visitors. It must return the + // resulting Node (which might be different in an AST transform). + Accept(Visitor) Node + + // Pos returns the position of this node in some source. + Pos() Pos + + // Type returns the type of this node for the given context. + Type(Scope) (Type, error) +} + +// Pos is the starting position of an AST node +type Pos struct { + Column, Line int // Column/Line number, starting at 1 + Filename string // Optional source filename, if known +} + +func (p Pos) String() string { + if p.Filename == "" { + return fmt.Sprintf("%d:%d", p.Line, p.Column) + } else { + return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) + } +} + +// InitPos is an initiaial position value. This should be used as +// the starting position (presets the column and line to 1). +var InitPos = Pos{Column: 1, Line: 1} + +// Visitors are just implementations of this function. +// +// The function must return the Node to replace this node with. "nil" is +// _not_ a valid return value. If there is no replacement, the original node +// should be returned. We build this replacement directly into the visitor +// pattern since AST transformations are a common and useful tool and +// building it into the AST itself makes it required for future Node +// implementations and very easy to do. +// +// Note that this isn't a true implementation of the visitor pattern, which +// generally requires proper type dispatch on the function. However, +// implementing this basic visitor pattern style is still very useful even +// if you have to type switch. +type Visitor func(Node) Node + +//go:generate stringer -type=Type + +// Type is the type of any value. +type Type uint32 + +const ( + TypeInvalid Type = 0 + TypeAny Type = 1 << iota + TypeBool + TypeString + TypeInt + TypeFloat + TypeList + TypeMap + + // This is a special type used by Terraform to mark "unknown" values. + // It is impossible for this type to be introduced into your HIL programs + // unless you explicitly set a variable to this value. In that case, + // any operation including the variable will return "TypeUnknown" as the + // type. + TypeUnknown +) + +func (t Type) Printable() string { + switch t { + case TypeInvalid: + return "invalid type" + case TypeAny: + return "any type" + case TypeBool: + return "type bool" + case TypeString: + return "type string" + case TypeInt: + return "type int" + case TypeFloat: + return "type float" + case TypeList: + return "type list" + case TypeMap: + return "type map" + case TypeUnknown: + return "type unknown" + default: + return "unknown type" + } +} diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go new file mode 100644 index 00000000..05570110 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/call.go @@ -0,0 +1,47 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Call represents a function call. +type Call struct { + Func string + Args []Node + Posx Pos +} + +func (n *Call) Accept(v Visitor) Node { + for i, a := range n.Args { + n.Args[i] = a.Accept(v) + } + + return v(n) +} + +func (n *Call) Pos() Pos { + return n.Posx +} + +func (n *Call) String() string { + args := make([]string, len(n.Args)) + for i, arg := range n.Args { + args[i] = fmt.Sprintf("%s", arg) + } + + return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) +} + +func (n *Call) Type(s Scope) (Type, error) { + f, ok := s.LookupFunc(n.Func) + if !ok { + return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) + } + + return f.ReturnType, nil +} + +func (n *Call) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go new file mode 100644 index 00000000..860c25fd --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/index.go @@ -0,0 +1,76 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Index represents an indexing operation into another data structure +type Index struct { + Target Node + Key Node + Posx Pos +} + +func (n *Index) Accept(v Visitor) Node { + n.Target = n.Target.Accept(v) + n.Key = n.Key.Accept(v) + return v(n) +} + +func (n *Index) Pos() Pos { + return n.Posx +} + +func (n *Index) String() string { + return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) +} + +func (n *Index) Type(s Scope) (Type, error) { + variableAccess, ok := n.Target.(*VariableAccess) + if !ok { + return TypeInvalid, fmt.Errorf("target is not a variable") + } + + variable, ok := s.LookupVar(variableAccess.Name) + if !ok { + return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) + } + + switch variable.Type { + case TypeList: + return n.typeList(variable, variableAccess.Name) + case TypeMap: + return n.typeMap(variable, variableAccess.Name) + default: + return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) + } +} + +func (n *Index) typeList(variable Variable, variableName string) (Type, error) { + // We assume type checking has already determined that this is a list + list := variable.Value.([]Variable) + + return VariableListElementTypesAreHomogenous(variableName, list) +} + +func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { + // We assume type checking has already determined that this is a map + vmap := variable.Value.(map[string]Variable) + + return VariableMapValueTypesAreHomogenous(variableName, vmap) +} + +func reportTypes(typesFound map[Type]struct{}) string { + stringTypes := make([]string, len(typesFound)) + i := 0 + for k, _ := range typesFound { + stringTypes[0] = k.String() + i++ + } + return strings.Join(stringTypes, ", ") +} + +func (n *Index) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go new file mode 100644 index 00000000..da6014fe --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/literal.go @@ -0,0 +1,88 @@ +package ast + +import ( + "fmt" + "reflect" +) + +// LiteralNode represents a single literal value, such as "foo" or +// 42 or 3.14159. Based on the Type, the Value can be safely cast. +type LiteralNode struct { + Value interface{} + Typex Type + Posx Pos +} + +// NewLiteralNode returns a new literal node representing the given +// literal Go value, which must correspond to one of the primitive types +// supported by HIL. Lists and maps cannot currently be constructed via +// this function. +// +// If an inappropriately-typed value is provided, this function will +// return an error. The main intended use of this function is to produce +// "synthetic" literals from constants in code, where the value type is +// well known at compile time. To easily store these in global variables, +// see also MustNewLiteralNode. +func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { + goType := reflect.TypeOf(value) + var hilType Type + + switch goType.Kind() { + case reflect.Bool: + hilType = TypeBool + case reflect.Int: + hilType = TypeInt + case reflect.Float64: + hilType = TypeFloat + case reflect.String: + hilType = TypeString + default: + return nil, fmt.Errorf("unsupported literal node type: %T", value) + } + + return &LiteralNode{ + Value: value, + Typex: hilType, + Posx: pos, + }, nil +} + +// MustNewLiteralNode wraps NewLiteralNode and panics if an error is +// returned, thus allowing valid literal nodes to be easily assigned to +// global variables. +func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { + node, err := NewLiteralNode(value, pos) + if err != nil { + panic(err) + } + return node +} + +func (n *LiteralNode) Accept(v Visitor) Node { + return v(n) +} + +func (n *LiteralNode) Pos() Pos { + return n.Posx +} + +func (n *LiteralNode) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *LiteralNode) String() string { + return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) +} + +func (n *LiteralNode) Type(Scope) (Type, error) { + return n.Typex, nil +} + +// IsUnknown returns true either if the node's value is itself unknown +// of if it is a collection containing any unknown elements, deeply. +func (n *LiteralNode) IsUnknown() bool { + return IsUnknown(Variable{ + Type: n.Typex, + Value: n.Value, + }) +} diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go new file mode 100644 index 00000000..1e27f970 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/output.go @@ -0,0 +1,78 @@ +package ast + +import ( + "bytes" + "fmt" +) + +// Output represents the root node of all interpolation evaluations. If the +// output only has one expression which is either a TypeList or TypeMap, the +// Output can be type-asserted to []interface{} or map[string]interface{} +// respectively. Otherwise the Output evaluates as a string, and concatenates +// the evaluation of each expression. +type Output struct { + Exprs []Node + Posx Pos +} + +func (n *Output) Accept(v Visitor) Node { + for i, expr := range n.Exprs { + n.Exprs[i] = expr.Accept(v) + } + + return v(n) +} + +func (n *Output) Pos() Pos { + return n.Posx +} + +func (n *Output) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *Output) String() string { + var b bytes.Buffer + for _, expr := range n.Exprs { + b.WriteString(fmt.Sprintf("%s", expr)) + } + + return b.String() +} + +func (n *Output) Type(s Scope) (Type, error) { + // Special case no expressions for backward compatibility + if len(n.Exprs) == 0 { + return TypeString, nil + } + + // Special case a single expression of types list or map + if len(n.Exprs) == 1 { + exprType, err := n.Exprs[0].Type(s) + if err != nil { + return TypeInvalid, err + } + switch exprType { + case TypeList: + return TypeList, nil + case TypeMap: + return TypeMap, nil + } + } + + // Otherwise ensure all our expressions are strings + for index, expr := range n.Exprs { + exprType, err := expr.Type(s) + if err != nil { + return TypeInvalid, err + } + // We only look for things we know we can't coerce with an implicit conversion func + if exprType == TypeList || exprType == TypeMap { + return TypeInvalid, fmt.Errorf( + "multi-expression HIL outputs may only have string inputs: %d is type %s", + index, exprType) + } + } + + return TypeString, nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go new file mode 100644 index 00000000..7a975d99 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/scope.go @@ -0,0 +1,90 @@ +package ast + +import ( + "fmt" + "reflect" +) + +// Scope is the interface used to look up variables and functions while +// evaluating. How these functions/variables are defined are up to the caller. +type Scope interface { + LookupFunc(string) (Function, bool) + LookupVar(string) (Variable, bool) +} + +// Variable is a variable value for execution given as input to the engine. +// It records the value of a variables along with their type. +type Variable struct { + Value interface{} + Type Type +} + +// NewVariable creates a new Variable for the given value. This will +// attempt to infer the correct type. If it can't, an error will be returned. +func NewVariable(v interface{}) (result Variable, err error) { + switch v := reflect.ValueOf(v); v.Kind() { + case reflect.String: + result.Type = TypeString + default: + err = fmt.Errorf("Unknown type: %s", v.Kind()) + } + + result.Value = v + return +} + +// String implements Stringer on Variable, displaying the type and value +// of the Variable. +func (v Variable) String() string { + return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) +} + +// Function defines a function that can be executed by the engine. +// The type checker will validate that the proper types will be called +// to the callback. +type Function struct { + // ArgTypes is the list of types in argument order. These are the + // required arguments. + // + // ReturnType is the type of the returned value. The Callback MUST + // return this type. + ArgTypes []Type + ReturnType Type + + // Variadic, if true, says that this function is variadic, meaning + // it takes a variable number of arguments. In this case, the + // VariadicType must be set. + Variadic bool + VariadicType Type + + // Callback is the function called for a function. The argument + // types are guaranteed to match the spec above by the type checker. + // The length of the args is strictly == len(ArgTypes) unless Varidiac + // is true, in which case its >= len(ArgTypes). + Callback func([]interface{}) (interface{}, error) +} + +// BasicScope is a simple scope that looks up variables and functions +// using a map. +type BasicScope struct { + FuncMap map[string]Function + VarMap map[string]Variable +} + +func (s *BasicScope) LookupFunc(n string) (Function, bool) { + if s == nil { + return Function{}, false + } + + v, ok := s.FuncMap[n] + return v, ok +} + +func (s *BasicScope) LookupVar(n string) (Variable, bool) { + if s == nil { + return Variable{}, false + } + + v, ok := s.VarMap[n] + return v, ok +} diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go new file mode 100644 index 00000000..bd2bc157 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/stack.go @@ -0,0 +1,25 @@ +package ast + +// Stack is a stack of Node. +type Stack struct { + stack []Node +} + +func (s *Stack) Len() int { + return len(s.stack) +} + +func (s *Stack) Push(n Node) { + s.stack = append(s.stack, n) +} + +func (s *Stack) Pop() Node { + x := s.stack[len(s.stack)-1] + s.stack[len(s.stack)-1] = nil + s.stack = s.stack[:len(s.stack)-1] + return x +} + +func (s *Stack) Reset() { + s.stack = nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go new file mode 100644 index 00000000..1f51a98d --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/type_string.go @@ -0,0 +1,54 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT + +package ast + +import "fmt" + +const ( + _Type_name_0 = "TypeInvalid" + _Type_name_1 = "TypeAny" + _Type_name_2 = "TypeBool" + _Type_name_3 = "TypeString" + _Type_name_4 = "TypeInt" + _Type_name_5 = "TypeFloat" + _Type_name_6 = "TypeList" + _Type_name_7 = "TypeMap" + _Type_name_8 = "TypeUnknown" +) + +var ( + _Type_index_0 = [...]uint8{0, 11} + _Type_index_1 = [...]uint8{0, 7} + _Type_index_2 = [...]uint8{0, 8} + _Type_index_3 = [...]uint8{0, 10} + _Type_index_4 = [...]uint8{0, 7} + _Type_index_5 = [...]uint8{0, 9} + _Type_index_6 = [...]uint8{0, 8} + _Type_index_7 = [...]uint8{0, 7} + _Type_index_8 = [...]uint8{0, 11} +) + +func (i Type) String() string { + switch { + case i == 0: + return _Type_name_0 + case i == 2: + return _Type_name_1 + case i == 4: + return _Type_name_2 + case i == 8: + return _Type_name_3 + case i == 16: + return _Type_name_4 + case i == 32: + return _Type_name_5 + case i == 64: + return _Type_name_6 + case i == 128: + return _Type_name_7 + case i == 256: + return _Type_name_8 + default: + return fmt.Sprintf("Type(%d)", i) + } +} diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go new file mode 100644 index 00000000..4c1362d7 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/variable_access.go @@ -0,0 +1,36 @@ +package ast + +import ( + "fmt" +) + +// VariableAccess represents a variable access. +type VariableAccess struct { + Name string + Posx Pos +} + +func (n *VariableAccess) Accept(v Visitor) Node { + return v(n) +} + +func (n *VariableAccess) Pos() Pos { + return n.Posx +} + +func (n *VariableAccess) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *VariableAccess) String() string { + return fmt.Sprintf("Variable(%s)", n.Name) +} + +func (n *VariableAccess) Type(s Scope) (Type, error) { + v, ok := s.LookupVar(n.Name) + if !ok { + return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) + } + + return v.Type, nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go new file mode 100644 index 00000000..06bd18de --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go @@ -0,0 +1,63 @@ +package ast + +import "fmt" + +func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { + if len(list) == 0 { + return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown + for _, v := range list { + if v.Type == TypeUnknown { + continue + } + + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "list %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type + } + + return elemType, nil +} + +func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { + if len(vmap) == 0 { + return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown + for _, v := range vmap { + if v.Type == TypeUnknown { + continue + } + + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "map %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type + } + + return elemType, nil +} diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go new file mode 100644 index 00000000..909c788a --- /dev/null +++ b/vendor/github.com/hashicorp/hil/builtins.go @@ -0,0 +1,331 @@ +package hil + +import ( + "errors" + "strconv" + + "github.com/hashicorp/hil/ast" +) + +// NOTE: All builtins are tested in engine_test.go + +func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { + if scope == nil { + scope = new(ast.BasicScope) + } + if scope.FuncMap == nil { + scope.FuncMap = make(map[string]ast.Function) + } + + // Implicit conversions + scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() + scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() + scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() + scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() + scope.FuncMap["__builtin_IntToString"] = builtinIntToString() + scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() + scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() + scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() + + // Math operations + scope.FuncMap["__builtin_IntMath"] = builtinIntMath() + scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() + scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() + scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() + scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() + scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() + scope.FuncMap["__builtin_Logical"] = builtinLogical() + return scope +} + +func builtinFloatMath() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeFloat, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(float64) + for _, raw := range args[2:] { + arg := raw.(float64) + switch op { + case ast.ArithmeticOpAdd: + result += arg + case ast.ArithmeticOpSub: + result -= arg + case ast.ArithmeticOpMul: + result *= arg + case ast.ArithmeticOpDiv: + result /= arg + } + } + + return result, nil + }, + } +} + +func builtinIntMath() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeInt, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(int) + for _, raw := range args[2:] { + arg := raw.(int) + switch op { + case ast.ArithmeticOpAdd: + result += arg + case ast.ArithmeticOpSub: + result -= arg + case ast.ArithmeticOpMul: + result *= arg + case ast.ArithmeticOpDiv: + if arg == 0 { + return nil, errors.New("divide by zero") + } + + result /= arg + case ast.ArithmeticOpMod: + if arg == 0 { + return nil, errors.New("divide by zero") + } + + result = result % arg + } + } + + return result, nil + }, + } +} + +func builtinBoolCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(bool) + rhs := args[2].(bool) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinFloatCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(float64) + rhs := args[2].(float64) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + case ast.ArithmeticOpLessThan: + return lhs < rhs, nil + case ast.ArithmeticOpLessThanOrEqual: + return lhs <= rhs, nil + case ast.ArithmeticOpGreaterThan: + return lhs > rhs, nil + case ast.ArithmeticOpGreaterThanOrEqual: + return lhs >= rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinIntCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(int) + rhs := args[2].(int) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + case ast.ArithmeticOpLessThan: + return lhs < rhs, nil + case ast.ArithmeticOpLessThanOrEqual: + return lhs <= rhs, nil + case ast.ArithmeticOpGreaterThan: + return lhs > rhs, nil + case ast.ArithmeticOpGreaterThanOrEqual: + return lhs >= rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinStringCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(string) + rhs := args[2].(string) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinLogical() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeBool, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(bool) + for _, raw := range args[2:] { + arg := raw.(bool) + switch op { + case ast.ArithmeticOpLogicalOr: + result = result || arg + case ast.ArithmeticOpLogicalAnd: + result = result && arg + default: + return nil, errors.New("invalid logical operator") + } + } + + return result, nil + }, + } +} + +func builtinFloatToInt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(args[0].(float64)), nil + }, + } +} + +func builtinFloatToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatFloat( + args[0].(float64), 'g', -1, 64), nil + }, + } +} + +func builtinIntToFloat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return float64(args[0].(int)), nil + }, + } +} + +func builtinIntToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatInt(int64(args[0].(int)), 10), nil + }, + } +} + +func builtinStringToInt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseInt(args[0].(string), 0, 0) + if err != nil { + return nil, err + } + + return int(v), nil + }, + } +} + +func builtinStringToFloat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseFloat(args[0].(string), 64) + if err != nil { + return nil, err + } + + return v, nil + }, + } +} + +func builtinBoolToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeBool}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatBool(args[0].(bool)), nil + }, + } +} + +func builtinStringToBool() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseBool(args[0].(string)) + if err != nil { + return nil, err + } + + return v, nil + }, + } +} diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go new file mode 100644 index 00000000..474f5058 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/check_identifier.go @@ -0,0 +1,88 @@ +package hil + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hil/ast" +) + +// IdentifierCheck is a SemanticCheck that checks that all identifiers +// resolve properly and that the right number of arguments are passed +// to functions. +type IdentifierCheck struct { + Scope ast.Scope + + err error + lock sync.Mutex +} + +func (c *IdentifierCheck) Visit(root ast.Node) error { + c.lock.Lock() + defer c.lock.Unlock() + defer c.reset() + root.Accept(c.visit) + return c.err +} + +func (c *IdentifierCheck) visit(raw ast.Node) ast.Node { + if c.err != nil { + return raw + } + + switch n := raw.(type) { + case *ast.Call: + c.visitCall(n) + case *ast.VariableAccess: + c.visitVariableAccess(n) + case *ast.Output: + // Ignore + case *ast.LiteralNode: + // Ignore + default: + // Ignore + } + + // We never do replacement with this visitor + return raw +} + +func (c *IdentifierCheck) visitCall(n *ast.Call) { + // Look up the function in the map + function, ok := c.Scope.LookupFunc(n.Func) + if !ok { + c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func)) + return + } + + // Break up the args into what is variadic and what is required + args := n.Args + if function.Variadic && len(args) > len(function.ArgTypes) { + args = n.Args[:len(function.ArgTypes)] + } + + // Verify the number of arguments + if len(args) != len(function.ArgTypes) { + c.createErr(n, fmt.Sprintf( + "%s: expected %d arguments, got %d", + n.Func, len(function.ArgTypes), len(n.Args))) + return + } +} + +func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) { + // Look up the variable in the map + if _, ok := c.Scope.LookupVar(n.Name); !ok { + c.createErr(n, fmt.Sprintf( + "unknown variable accessed: %s", n.Name)) + return + } +} + +func (c *IdentifierCheck) createErr(n ast.Node, str string) { + c.err = fmt.Errorf("%s: %s", n.Pos(), str) +} + +func (c *IdentifierCheck) reset() { + c.err = nil +} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go new file mode 100644 index 00000000..f16da391 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/check_types.go @@ -0,0 +1,668 @@ +package hil + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hil/ast" +) + +// TypeCheck implements ast.Visitor for type checking an AST tree. +// It requires some configuration to look up the type of nodes. +// +// It also optionally will not type error and will insert an implicit +// type conversions for specific types if specified by the Implicit +// field. Note that this is kind of organizationally weird to put into +// this structure but we'd rather do that than duplicate the type checking +// logic multiple times. +type TypeCheck struct { + Scope ast.Scope + + // Implicit is a map of implicit type conversions that we can do, + // and that shouldn't error. The key of the first map is the from type, + // the key of the second map is the to type, and the final string + // value is the function to call (which must be registered in the Scope). + Implicit map[ast.Type]map[ast.Type]string + + // Stack of types. This shouldn't be used directly except by implementations + // of TypeCheckNode. + Stack []ast.Type + + err error + lock sync.Mutex +} + +// TypeCheckNode is the interface that must be implemented by any +// ast.Node that wants to support type-checking. If the type checker +// encounters a node that doesn't implement this, it will error. +type TypeCheckNode interface { + TypeCheck(*TypeCheck) (ast.Node, error) +} + +func (v *TypeCheck) Visit(root ast.Node) error { + v.lock.Lock() + defer v.lock.Unlock() + defer v.reset() + root.Accept(v.visit) + + // If the resulting type is unknown, then just let the whole thing go. + if v.err == errExitUnknown { + v.err = nil + } + + return v.err +} + +func (v *TypeCheck) visit(raw ast.Node) ast.Node { + if v.err != nil { + return raw + } + + var result ast.Node + var err error + switch n := raw.(type) { + case *ast.Arithmetic: + tc := &typeCheckArithmetic{n} + result, err = tc.TypeCheck(v) + case *ast.Call: + tc := &typeCheckCall{n} + result, err = tc.TypeCheck(v) + case *ast.Conditional: + tc := &typeCheckConditional{n} + result, err = tc.TypeCheck(v) + case *ast.Index: + tc := &typeCheckIndex{n} + result, err = tc.TypeCheck(v) + case *ast.Output: + tc := &typeCheckOutput{n} + result, err = tc.TypeCheck(v) + case *ast.LiteralNode: + tc := &typeCheckLiteral{n} + result, err = tc.TypeCheck(v) + case *ast.VariableAccess: + tc := &typeCheckVariableAccess{n} + result, err = tc.TypeCheck(v) + default: + tc, ok := raw.(TypeCheckNode) + if !ok { + err = fmt.Errorf("unknown node for type check: %#v", raw) + break + } + + result, err = tc.TypeCheck(v) + } + + if err != nil { + pos := raw.Pos() + v.err = fmt.Errorf("At column %d, line %d: %s", + pos.Column, pos.Line, err) + } + + return result +} + +type typeCheckArithmetic struct { + n *ast.Arithmetic +} + +func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { + // The arguments are on the stack in reverse order, so pop them off. + exprs := make([]ast.Type, len(tc.n.Exprs)) + for i, _ := range tc.n.Exprs { + exprs[len(tc.n.Exprs)-1-i] = v.StackPop() + } + + // If any operand is unknown then our result is automatically unknown + for _, ty := range exprs { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + + switch tc.n.Op { + case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: + return tc.checkLogical(v, exprs) + case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, + ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, + ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: + return tc.checkComparison(v, exprs) + default: + return tc.checkNumeric(v, exprs) + } + +} + +func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + // Determine the resulting type we want. We do this by going over + // every expression until we find one with a type we recognize. + // We do this because the first expr might be a string ("var.foo") + // and we need to know what to implicit to. + mathFunc := "__builtin_IntMath" + mathType := ast.TypeInt + for _, v := range exprs { + // We assume int math but if we find ANY float, the entire + // expression turns into floating point math. + if v == ast.TypeFloat { + mathFunc = "__builtin_FloatMath" + mathType = v + break + } + } + + // Verify the args + for i, arg := range exprs { + if arg != mathType { + cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i]) + if cn != nil { + tc.n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "operand %d should be %s, got %s", + i+1, mathType, arg) + } + } + + // Modulo doesn't work for floats + if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod { + return nil, fmt.Errorf("modulo cannot be used with floats") + } + + // Return type + v.StackPush(mathType) + + // Replace our node with a call to the proper function. This isn't + // type checked but we already verified types. + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: mathFunc, + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + if len(exprs) != 2 { + // This should never happen, because the parser never produces + // nodes that violate this. + return nil, fmt.Errorf( + "comparison operators must have exactly two operands", + ) + } + + // The first operand always dictates the type for a comparison. + compareFunc := "" + compareType := exprs[0] + switch compareType { + case ast.TypeBool: + compareFunc = "__builtin_BoolCompare" + case ast.TypeFloat: + compareFunc = "__builtin_FloatCompare" + case ast.TypeInt: + compareFunc = "__builtin_IntCompare" + case ast.TypeString: + compareFunc = "__builtin_StringCompare" + default: + return nil, fmt.Errorf( + "comparison operators apply only to bool, float, int, and string", + ) + } + + // For non-equality comparisons, we will do implicit conversions to + // integer types if possible. In this case, we need to go through and + // determine the type of comparison we're doing to enable the implicit + // conversion. + if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { + compareFunc = "__builtin_IntCompare" + compareType = ast.TypeInt + for _, expr := range exprs { + if expr == ast.TypeFloat { + compareFunc = "__builtin_FloatCompare" + compareType = ast.TypeFloat + break + } + } + } + + // Verify (and possibly, convert) the args + for i, arg := range exprs { + if arg != compareType { + cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) + if cn != nil { + tc.n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "operand %d should be %s, got %s", + i+1, compareType, arg, + ) + } + } + + // Only ints and floats can have the <, >, <= and >= operators applied + switch tc.n.Op { + case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: + // anything goes + default: + switch compareType { + case ast.TypeFloat, ast.TypeInt: + // fine + default: + return nil, fmt.Errorf( + "<, >, <= and >= may apply only to int and float values", + ) + } + } + + // Comparison operators always return bool + v.StackPush(ast.TypeBool) + + // Replace our node with a call to the proper function. This isn't + // type checked but we already verified types. + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: compareFunc, + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + for i, t := range exprs { + if t != ast.TypeBool { + cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) + if cn == nil { + return nil, fmt.Errorf( + "logical operators require boolean operands, not %s", + t, + ) + } + tc.n.Exprs[i] = cn + } + } + + // Return type is always boolean + v.StackPush(ast.TypeBool) + + // Arithmetic nodes are replaced with a call to a built-in function + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: "__builtin_Logical", + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +type typeCheckCall struct { + n *ast.Call +} + +func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { + // Look up the function in the map + function, ok := v.Scope.LookupFunc(tc.n.Func) + if !ok { + return nil, fmt.Errorf("unknown function called: %s", tc.n.Func) + } + + // The arguments are on the stack in reverse order, so pop them off. + args := make([]ast.Type, len(tc.n.Args)) + for i, _ := range tc.n.Args { + args[len(tc.n.Args)-1-i] = v.StackPop() + } + + // Verify the args + for i, expected := range function.ArgTypes { + if expected == ast.TypeAny { + continue + } + + if args[i] == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if args[i] != expected { + cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) + if cn != nil { + tc.n.Args[i] = cn + continue + } + + return nil, fmt.Errorf( + "%s: argument %d should be %s, got %s", + tc.n.Func, i+1, expected.Printable(), args[i].Printable()) + } + } + + // If we're variadic, then verify the types there + if function.Variadic && function.VariadicType != ast.TypeAny { + args = args[len(function.ArgTypes):] + for i, t := range args { + if t == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if t != function.VariadicType { + realI := i + len(function.ArgTypes) + cn := v.ImplicitConversion( + t, function.VariadicType, tc.n.Args[realI]) + if cn != nil { + tc.n.Args[realI] = cn + continue + } + + return nil, fmt.Errorf( + "%s: argument %d should be %s, got %s", + tc.n.Func, realI, + function.VariadicType.Printable(), t.Printable()) + } + } + } + + // Return type + v.StackPush(function.ReturnType) + + return tc.n, nil +} + +type typeCheckConditional struct { + n *ast.Conditional +} + +func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { + // On the stack we have the types of the condition, true and false + // expressions, but they are in reverse order. + falseType := v.StackPop() + trueType := v.StackPop() + condType := v.StackPop() + + if condType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if condType != ast.TypeBool { + cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) + if cn == nil { + return nil, fmt.Errorf( + "condition must be type bool, not %s", condType.Printable(), + ) + } + tc.n.CondExpr = cn + } + + // The types of the true and false expression must match + if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { + + // Since passing around stringified versions of other types is + // common, we pragmatically allow the false expression to dictate + // the result type when the true expression is a string. + if trueType == ast.TypeString { + cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) + if cn == nil { + return nil, fmt.Errorf( + "true and false expression types must match; have %s and %s", + trueType.Printable(), falseType.Printable(), + ) + } + tc.n.TrueExpr = cn + trueType = falseType + } else { + cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) + if cn == nil { + return nil, fmt.Errorf( + "true and false expression types must match; have %s and %s", + trueType.Printable(), falseType.Printable(), + ) + } + tc.n.FalseExpr = cn + falseType = trueType + } + } + + // Currently list and map types cannot be used, because we cannot + // generally assert that their element types are consistent. + // Such support might be added later, either by improving the type + // system or restricting usage to only variable and literal expressions, + // but for now this is simply prohibited because it doesn't seem to + // be a common enough case to be worth the complexity. + switch trueType { + case ast.TypeList: + return nil, fmt.Errorf( + "conditional operator cannot be used with list values", + ) + case ast.TypeMap: + return nil, fmt.Errorf( + "conditional operator cannot be used with map values", + ) + } + + // Result type (guaranteed to also match falseType due to the above) + if trueType == ast.TypeUnknown { + // falseType may also be unknown, but that's okay because two + // unknowns means our result is unknown anyway. + v.StackPush(falseType) + } else { + v.StackPush(trueType) + } + + return tc.n, nil +} + +type typeCheckOutput struct { + n *ast.Output +} + +func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { + n := tc.n + types := make([]ast.Type, len(n.Exprs)) + for i, _ := range n.Exprs { + types[len(n.Exprs)-1-i] = v.StackPop() + } + + for _, ty := range types { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + + // If there is only one argument and it is a list, we evaluate to a list + if len(types) == 1 { + switch t := types[0]; t { + case ast.TypeList: + fallthrough + case ast.TypeMap: + v.StackPush(t) + return n, nil + } + } + + // Otherwise, all concat args must be strings, so validate that + resultType := ast.TypeString + for i, t := range types { + + if t == ast.TypeUnknown { + resultType = ast.TypeUnknown + continue + } + + if t != ast.TypeString { + cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) + if cn != nil { + n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t) + } + } + + // This always results in type string, unless there are unknowns + v.StackPush(resultType) + + return n, nil +} + +type typeCheckLiteral struct { + n *ast.LiteralNode +} + +func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) { + v.StackPush(tc.n.Typex) + return tc.n, nil +} + +type typeCheckVariableAccess struct { + n *ast.VariableAccess +} + +func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { + // Look up the variable in the map + variable, ok := v.Scope.LookupVar(tc.n.Name) + if !ok { + return nil, fmt.Errorf( + "unknown variable accessed: %s", tc.n.Name) + } + + // Add the type to the stack + v.StackPush(variable.Type) + + return tc.n, nil +} + +type typeCheckIndex struct { + n *ast.Index +} + +func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { + keyType := v.StackPop() + targetType := v.StackPop() + + if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + // Ensure we have a VariableAccess as the target + varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) + if !ok { + return nil, fmt.Errorf( + "target of an index must be a VariableAccess node, was %T", tc.n.Target) + } + + // Get the variable + variable, ok := v.Scope.LookupVar(varAccessNode.Name) + if !ok { + return nil, fmt.Errorf( + "unknown variable accessed: %s", varAccessNode.Name) + } + + switch targetType { + case ast.TypeList: + if keyType != ast.TypeInt { + tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) + if tc.n.Key == nil { + return nil, fmt.Errorf( + "key of an index must be an int, was %s", keyType) + } + } + + valType, err := ast.VariableListElementTypesAreHomogenous( + varAccessNode.Name, variable.Value.([]ast.Variable)) + if err != nil { + return tc.n, err + } + + v.StackPush(valType) + return tc.n, nil + case ast.TypeMap: + if keyType != ast.TypeString { + tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) + if tc.n.Key == nil { + return nil, fmt.Errorf( + "key of an index must be a string, was %s", keyType) + } + } + + valType, err := ast.VariableMapValueTypesAreHomogenous( + varAccessNode.Name, variable.Value.(map[string]ast.Variable)) + if err != nil { + return tc.n, err + } + + v.StackPush(valType) + return tc.n, nil + default: + return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) + } +} + +func (v *TypeCheck) ImplicitConversion( + actual ast.Type, expected ast.Type, n ast.Node) ast.Node { + if v.Implicit == nil { + return nil + } + + fromMap, ok := v.Implicit[actual] + if !ok { + return nil + } + + toFunc, ok := fromMap[expected] + if !ok { + return nil + } + + return &ast.Call{ + Func: toFunc, + Args: []ast.Node{n}, + Posx: n.Pos(), + } +} + +func (v *TypeCheck) reset() { + v.Stack = nil + v.err = nil +} + +func (v *TypeCheck) StackPush(t ast.Type) { + v.Stack = append(v.Stack, t) +} + +func (v *TypeCheck) StackPop() ast.Type { + var x ast.Type + x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] + return x +} + +func (v *TypeCheck) StackPeek() ast.Type { + if len(v.Stack) == 0 { + return ast.TypeInvalid + } + + return v.Stack[len(v.Stack)-1] +} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go new file mode 100644 index 00000000..f2024d01 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -0,0 +1,159 @@ +package hil + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/mapstructure" +) + +// UnknownValue is a sentinel value that can be used to denote +// that a value of a variable (or map element, list element, etc.) +// is unknown. This will always have the type ast.TypeUnknown. +const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +var hilMapstructureDecodeHookSlice []interface{} +var hilMapstructureDecodeHookStringSlice []string +var hilMapstructureDecodeHookMap map[string]interface{} + +// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode +// but has a DecodeHook which defeats the backward compatibility mode of mapstructure +// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This +// allows us to use WeakDecode (desirable), but not fail on empty lists. +func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { + config := &mapstructure.DecoderConfig{ + DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { + sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice) + stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) + mapType := reflect.TypeOf(hilMapstructureDecodeHookMap) + + if (source == sliceType || source == stringSliceType) && target == mapType { + return nil, fmt.Errorf("Cannot convert %s into a %s", source, target) + } + + return val, nil + }, + WeaklyTypedInput: true, + Result: rawVal, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +func InterfaceToVariable(input interface{}) (ast.Variable, error) { + if inputVariable, ok := input.(ast.Variable); ok { + return inputVariable, nil + } + + var stringVal string + if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { + // Special case the unknown value to turn into "unknown" + if stringVal == UnknownValue { + return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil + } + + // Otherwise return the string value + return ast.Variable{ + Type: ast.TypeString, + Value: stringVal, + }, nil + } + + var mapVal map[string]interface{} + if err := hilMapstructureWeakDecode(input, &mapVal); err == nil { + elements := make(map[string]ast.Variable) + for i, element := range mapVal { + varElement, err := InterfaceToVariable(element) + if err != nil { + return ast.Variable{}, err + } + elements[i] = varElement + } + + return ast.Variable{ + Type: ast.TypeMap, + Value: elements, + }, nil + } + + var sliceVal []interface{} + if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil { + elements := make([]ast.Variable, len(sliceVal)) + for i, element := range sliceVal { + varElement, err := InterfaceToVariable(element) + if err != nil { + return ast.Variable{}, err + } + elements[i] = varElement + } + + return ast.Variable{ + Type: ast.TypeList, + Value: elements, + }, nil + } + + return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) +} + +func VariableToInterface(input ast.Variable) (interface{}, error) { + if input.Type == ast.TypeString { + if inputStr, ok := input.Value.(string); ok { + return inputStr, nil + } else { + return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") + } + } + + if input.Type == ast.TypeList { + inputList, ok := input.Value.([]ast.Variable) + if !ok { + return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") + } + + result := make([]interface{}, 0) + if len(inputList) == 0 { + return result, nil + } + + for _, element := range inputList { + if convertedElement, err := VariableToInterface(element); err == nil { + result = append(result, convertedElement) + } else { + return nil, err + } + } + + return result, nil + } + + if input.Type == ast.TypeMap { + inputMap, ok := input.Value.(map[string]ast.Variable) + if !ok { + return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") + } + + result := make(map[string]interface{}, 0) + if len(inputMap) == 0 { + return result, nil + } + + for key, value := range inputMap { + if convertedValue, err := VariableToInterface(value); err == nil { + result[key] = convertedValue + } else { + return nil, err + } + } + + return result, nil + } + + return nil, fmt.Errorf("unknown input type: %s", input.Type) +} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go new file mode 100644 index 00000000..27820769 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/eval.go @@ -0,0 +1,472 @@ +package hil + +import ( + "bytes" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/hil/ast" +) + +// EvalConfig is the configuration for evaluating. +type EvalConfig struct { + // GlobalScope is the global scope of execution for evaluation. + GlobalScope *ast.BasicScope + + // SemanticChecks is a list of additional semantic checks that will be run + // on the tree prior to evaluating it. The type checker, identifier checker, + // etc. will be run before these automatically. + SemanticChecks []SemanticChecker +} + +// SemanticChecker is the type that must be implemented to do a +// semantic check on an AST tree. This will be called with the root node. +type SemanticChecker func(ast.Node) error + +// EvaluationResult is a struct returned from the hil.Eval function, +// representing the result of an interpolation. Results are returned in their +// "natural" Go structure rather than in terms of the HIL AST. For the types +// currently implemented, this means that the Value field can be interpreted as +// the following Go types: +// TypeInvalid: undefined +// TypeString: string +// TypeList: []interface{} +// TypeMap: map[string]interface{} +// TypBool: bool +type EvaluationResult struct { + Type EvalType + Value interface{} +} + +// InvalidResult is a structure representing the result of a HIL interpolation +// which has invalid syntax, missing variables, or some other type of error. +// The error is described out of band in the accompanying error return value. +var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} + +// errExitUnknown is an internal error that when returned means the result +// is an unknown value. We use this for early exit. +var errExitUnknown = errors.New("unknown value") + +func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { + output, outputType, err := internalEval(root, config) + if err != nil { + return InvalidResult, err + } + + // If the result contains any nested unknowns then the result as a whole + // is unknown, so that callers only have to deal with "entirely known" + // or "entirely unknown" as outcomes. + if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { + outputType = ast.TypeUnknown + output = UnknownValue + } + + switch outputType { + case ast.TypeList: + val, err := VariableToInterface(ast.Variable{ + Type: ast.TypeList, + Value: output, + }) + return EvaluationResult{ + Type: TypeList, + Value: val, + }, err + case ast.TypeMap: + val, err := VariableToInterface(ast.Variable{ + Type: ast.TypeMap, + Value: output, + }) + return EvaluationResult{ + Type: TypeMap, + Value: val, + }, err + case ast.TypeString: + return EvaluationResult{ + Type: TypeString, + Value: output, + }, nil + case ast.TypeBool: + return EvaluationResult{ + Type: TypeBool, + Value: output, + }, nil + case ast.TypeUnknown: + return EvaluationResult{ + Type: TypeUnknown, + Value: UnknownValue, + }, nil + default: + return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) + } +} + +// Eval evaluates the given AST tree and returns its output value, the type +// of the output, and any error that occurred. +func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) { + // Copy the scope so we can add our builtins + if config == nil { + config = new(EvalConfig) + } + scope := registerBuiltins(config.GlobalScope) + implicitMap := map[ast.Type]map[ast.Type]string{ + ast.TypeFloat: { + ast.TypeInt: "__builtin_FloatToInt", + ast.TypeString: "__builtin_FloatToString", + }, + ast.TypeInt: { + ast.TypeFloat: "__builtin_IntToFloat", + ast.TypeString: "__builtin_IntToString", + }, + ast.TypeString: { + ast.TypeInt: "__builtin_StringToInt", + ast.TypeFloat: "__builtin_StringToFloat", + ast.TypeBool: "__builtin_StringToBool", + }, + ast.TypeBool: { + ast.TypeString: "__builtin_BoolToString", + }, + } + + // Build our own semantic checks that we always run + tv := &TypeCheck{Scope: scope, Implicit: implicitMap} + ic := &IdentifierCheck{Scope: scope} + + // Build up the semantic checks for execution + checks := make( + []SemanticChecker, + len(config.SemanticChecks), + len(config.SemanticChecks)+2) + copy(checks, config.SemanticChecks) + checks = append(checks, ic.Visit) + checks = append(checks, tv.Visit) + + // Run the semantic checks + for _, check := range checks { + if err := check(root); err != nil { + return nil, ast.TypeInvalid, err + } + } + + // Execute + v := &evalVisitor{Scope: scope} + return v.Visit(root) +} + +// EvalNode is the interface that must be implemented by any ast.Node +// to support evaluation. This will be called in visitor pattern order. +// The result of each call to Eval is automatically pushed onto the +// stack as a LiteralNode. Pop elements off the stack to get child +// values. +type EvalNode interface { + Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) +} + +type evalVisitor struct { + Scope ast.Scope + Stack ast.Stack + + err error + lock sync.Mutex +} + +func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { + // Run the actual visitor pattern + root.Accept(v.visit) + + // Get our result and clear out everything else + var result *ast.LiteralNode + if v.Stack.Len() > 0 { + result = v.Stack.Pop().(*ast.LiteralNode) + } else { + result = new(ast.LiteralNode) + } + resultErr := v.err + if resultErr == errExitUnknown { + // This means the return value is unknown and we used the error + // as an early exit mechanism. Reset since the value on the stack + // should be the unknown value. + resultErr = nil + } + + // Clear everything else so we aren't just dangling + v.Stack.Reset() + v.err = nil + + t, err := result.Type(v.Scope) + if err != nil { + return nil, ast.TypeInvalid, err + } + + return result.Value, t, resultErr +} + +func (v *evalVisitor) visit(raw ast.Node) ast.Node { + if v.err != nil { + return raw + } + + en, err := evalNode(raw) + if err != nil { + v.err = err + return raw + } + + out, outType, err := en.Eval(v.Scope, &v.Stack) + if err != nil { + v.err = err + return raw + } + + v.Stack.Push(&ast.LiteralNode{ + Value: out, + Typex: outType, + }) + + if outType == ast.TypeUnknown { + // Halt immediately + v.err = errExitUnknown + return raw + } + + return raw +} + +// evalNode is a private function that returns an EvalNode for built-in +// types as well as any other EvalNode implementations. +func evalNode(raw ast.Node) (EvalNode, error) { + switch n := raw.(type) { + case *ast.Index: + return &evalIndex{n}, nil + case *ast.Call: + return &evalCall{n}, nil + case *ast.Conditional: + return &evalConditional{n}, nil + case *ast.Output: + return &evalOutput{n}, nil + case *ast.LiteralNode: + return &evalLiteralNode{n}, nil + case *ast.VariableAccess: + return &evalVariableAccess{n}, nil + default: + en, ok := n.(EvalNode) + if !ok { + return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw) + } + + return en, nil + } +} + +type evalCall struct{ *ast.Call } + +func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // Look up the function in the map + function, ok := s.LookupFunc(v.Func) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "unknown function called: %s", v.Func) + } + + // The arguments are on the stack in reverse order, so pop them off. + args := make([]interface{}, len(v.Args)) + for i, _ := range v.Args { + node := stack.Pop().(*ast.LiteralNode) + if node.IsUnknown() { + // If any arguments are unknown then the result is automatically unknown + return UnknownValue, ast.TypeUnknown, nil + } + args[len(v.Args)-1-i] = node.Value + } + + // Call the function + result, err := function.Callback(args) + if err != nil { + return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err) + } + + return result, function.ReturnType, nil +} + +type evalConditional struct{ *ast.Conditional } + +func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // On the stack we have literal nodes representing the resulting values + // of the condition, true and false expressions, but they are in reverse + // order. + falseLit := stack.Pop().(*ast.LiteralNode) + trueLit := stack.Pop().(*ast.LiteralNode) + condLit := stack.Pop().(*ast.LiteralNode) + + if condLit.IsUnknown() { + // If our conditional is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } + + if condLit.Value.(bool) { + return trueLit.Value, trueLit.Typex, nil + } else { + return falseLit.Value, trueLit.Typex, nil + } +} + +type evalIndex struct{ *ast.Index } + +func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + key := stack.Pop().(*ast.LiteralNode) + target := stack.Pop().(*ast.LiteralNode) + + variableName := v.Index.Target.(*ast.VariableAccess).Name + + if key.IsUnknown() { + // If our key is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } + + // For target, we'll accept collections containing unknown values but + // we still need to catch when the collection itself is unknown, shallowly. + if target.Typex == ast.TypeUnknown { + return UnknownValue, ast.TypeUnknown, nil + } + + switch target.Typex { + case ast.TypeList: + return v.evalListIndex(variableName, target.Value, key.Value) + case ast.TypeMap: + return v.evalMapIndex(variableName, target.Value, key.Value) + default: + return nil, ast.TypeInvalid, fmt.Errorf( + "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", + variableName, target.Typex) + } +} + +func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { + // We assume type checking was already done and we can assume that target + // is a list and key is an int + list, ok := target.([]ast.Variable) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast target to []Variable, is: %T", target) + } + + keyInt, ok := key.(int) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast key to int, is: %T", key) + } + + if len(list) == 0 { + return nil, ast.TypeInvalid, fmt.Errorf("list is empty") + } + + if keyInt < 0 || len(list) < keyInt+1 { + return nil, ast.TypeInvalid, fmt.Errorf( + "index %d out of range for list %s (max %d)", + keyInt, variableName, len(list)) + } + + returnVal := list[keyInt].Value + returnType := list[keyInt].Type + return returnVal, returnType, nil +} + +func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { + // We assume type checking was already done and we can assume that target + // is a map and key is a string + vmap, ok := target.(map[string]ast.Variable) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast target to map[string]Variable, is: %T", target) + } + + keyString, ok := key.(string) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast key to string, is: %T", key) + } + + if len(vmap) == 0 { + return nil, ast.TypeInvalid, fmt.Errorf("map is empty") + } + + value, ok := vmap[keyString] + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "key %q does not exist in map %s", keyString, variableName) + } + + return value.Value, value.Type, nil +} + +type evalOutput struct{ *ast.Output } + +func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // The expressions should all be on the stack in reverse + // order. So pop them off, reverse their order, and concatenate. + nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) + haveUnknown := false + for range v.Exprs { + n := stack.Pop().(*ast.LiteralNode) + nodes = append(nodes, n) + + // If we have any unknowns then the whole result is unknown + // (we must deal with this first, because the type checker can + // skip type conversions in the presence of unknowns, and thus + // any of our other nodes may be incorrectly typed.) + if n.IsUnknown() { + haveUnknown = true + } + } + + if haveUnknown { + return UnknownValue, ast.TypeUnknown, nil + } + + // Special case the single list and map + if len(nodes) == 1 { + switch t := nodes[0].Typex; t { + case ast.TypeList: + fallthrough + case ast.TypeMap: + fallthrough + case ast.TypeUnknown: + return nodes[0].Value, t, nil + } + } + + // Otherwise concatenate the strings + var buf bytes.Buffer + for i := len(nodes) - 1; i >= 0; i-- { + if nodes[i].Typex != ast.TypeString { + return nil, ast.TypeInvalid, fmt.Errorf( + "invalid output with %s value at index %d: %#v", + nodes[i].Typex, + i, + nodes[i].Value, + ) + } + buf.WriteString(nodes[i].Value.(string)) + } + + return buf.String(), ast.TypeString, nil +} + +type evalLiteralNode struct{ *ast.LiteralNode } + +func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) { + return v.Value, v.Typex, nil +} + +type evalVariableAccess struct{ *ast.VariableAccess } + +func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) { + // Look up the variable in the map + variable, ok := scope.LookupVar(v.Name) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "unknown variable accessed: %s", v.Name) + } + + return variable.Value, variable.Type, nil +} diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go new file mode 100644 index 00000000..6946ecd2 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/eval_type.go @@ -0,0 +1,16 @@ +package hil + +//go:generate stringer -type=EvalType eval_type.go + +// EvalType represents the type of the output returned from a HIL +// evaluation. +type EvalType uint32 + +const ( + TypeInvalid EvalType = 0 + TypeString EvalType = 1 << iota + TypeBool + TypeList + TypeMap + TypeUnknown +) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go new file mode 100644 index 00000000..b107ddd4 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/evaltype_string.go @@ -0,0 +1,42 @@ +// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT + +package hil + +import "fmt" + +const ( + _EvalType_name_0 = "TypeInvalid" + _EvalType_name_1 = "TypeString" + _EvalType_name_2 = "TypeBool" + _EvalType_name_3 = "TypeList" + _EvalType_name_4 = "TypeMap" + _EvalType_name_5 = "TypeUnknown" +) + +var ( + _EvalType_index_0 = [...]uint8{0, 11} + _EvalType_index_1 = [...]uint8{0, 10} + _EvalType_index_2 = [...]uint8{0, 8} + _EvalType_index_3 = [...]uint8{0, 8} + _EvalType_index_4 = [...]uint8{0, 7} + _EvalType_index_5 = [...]uint8{0, 11} +) + +func (i EvalType) String() string { + switch { + case i == 0: + return _EvalType_name_0 + case i == 2: + return _EvalType_name_1 + case i == 4: + return _EvalType_name_2 + case i == 8: + return _EvalType_name_3 + case i == 16: + return _EvalType_name_4 + case i == 32: + return _EvalType_name_5 + default: + return fmt.Sprintf("EvalType(%d)", i) + } +} diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go new file mode 100644 index 00000000..ecbe1fdb --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parse.go @@ -0,0 +1,29 @@ +package hil + +import ( + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/parser" + "github.com/hashicorp/hil/scanner" +) + +// Parse parses the given program and returns an executable AST tree. +// +// Syntax errors are returned with error having the dynamic type +// *parser.ParseError, which gives the caller access to the source position +// where the error was found, which allows (for example) combining it with +// a known source filename to add context to the error message. +func Parse(v string) (ast.Node, error) { + return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) +} + +// ParseWithPosition is like Parse except that it overrides the source +// row and column position of the first character in the string, which should +// be 1-based. +// +// This can be used when HIL is embedded in another language and the outer +// parser knows the row and column where the HIL expression started within +// the overall source file. +func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { + ch := scanner.Scan(v, pos) + return parser.Parse(ch) +} diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go new file mode 100644 index 00000000..e69df294 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/transform_fixed.go @@ -0,0 +1,29 @@ +package hil + +import ( + "github.com/hashicorp/hil/ast" +) + +// FixedValueTransform transforms an AST to return a fixed value for +// all interpolations. i.e. you can make "hi ${anything}" always +// turn into "hi foo". +// +// The primary use case for this is for config validations where you can +// verify that interpolations result in a certain type of string. +func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node { + // We visit the nodes in top-down order + result := root + switch n := result.(type) { + case *ast.Output: + for i, v := range n.Exprs { + n.Exprs[i] = FixedValueTransform(v, Value) + } + case *ast.LiteralNode: + // We keep it as-is + default: + // Anything else we replace + result = Value + } + + return result +} diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go new file mode 100644 index 00000000..0ace8306 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/walk.go @@ -0,0 +1,266 @@ +package hil + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/reflectwalk" +) + +// WalkFn is the type of function to pass to Walk. Modify fields within +// WalkData to control whether replacement happens. +type WalkFn func(*WalkData) error + +// WalkData is the structure passed to the callback of the Walk function. +// +// This structure contains data passed in as well as fields that are expected +// to be written by the caller as a result. Please see the documentation for +// each field for more information. +type WalkData struct { + // Root is the parsed root of this HIL program + Root ast.Node + + // Location is the location within the structure where this + // value was found. This can be used to modify behavior within + // slices and so on. + Location reflectwalk.Location + + // The below two values must be set by the callback to have any effect. + // + // Replace, if true, will replace the value in the structure with + // ReplaceValue. It is up to the caller to make sure this is a string. + Replace bool + ReplaceValue string +} + +// Walk will walk an arbitrary Go structure and parse any string as an +// HIL program and call the callback cb to determine what to replace it +// with. +// +// This function is very useful for arbitrary HIL program interpolation +// across a complex configuration structure. Due to the heavy use of +// reflection in this function, it is recommend to write many unit tests +// with your typical configuration structures to hilp mitigate the risk +// of panics. +func Walk(v interface{}, cb WalkFn) error { + walker := &interpolationWalker{F: cb} + return reflectwalk.Walk(v, walker) +} + +// interpolationWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// execute a callback for an interpolation. +type interpolationWalker struct { + F WalkFn + + key []string + lastValue reflect.Value + loc reflectwalk.Location + cs []reflect.Value + csKey []reflect.Value + csData interface{} + sliceIndex int + unknownKeys []string +} + +func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { + w.loc = loc + return nil +} + +func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { + w.loc = reflectwalk.None + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + // Split any values that need to be split + w.splitSlice() + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + } + + return nil +} + +func (w *interpolationWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { + w.csData = k + w.csKey = append(w.csKey, k) + w.key = append(w.key, k.String()) + w.lastValue = v + return nil +} + +func (w *interpolationWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + w.sliceIndex = i + return nil +} + +func (w *interpolationWalker) Primitive(v reflect.Value) error { + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + setV = v + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + astRoot, err := Parse(v.String()) + if err != nil { + return err + } + + // If the AST we got is just a literal string value with the same + // value then we ignore it. We have to check if its the same value + // because it is possible to input a string, get out a string, and + // have it be different. For example: "foo-$${bar}" turns into + // "foo-${bar}" + if n, ok := astRoot.(*ast.LiteralNode); ok { + if s, ok := n.Value.(string); ok && s == v.String() { + return nil + } + } + + if w.F == nil { + return nil + } + + data := WalkData{Root: astRoot, Location: w.loc} + if err := w.F(&data); err != nil { + return fmt.Errorf( + "%s in:\n\n%s", + err, v.String()) + } + + if data.Replace { + /* + if remove { + w.removeCurrent() + return nil + } + */ + + resultVal := reflect.ValueOf(data.ReplaceValue) + switch w.loc { + case reflectwalk.MapKey: + m := w.cs[len(w.cs)-1] + + // Delete the old value + var zero reflect.Value + m.SetMapIndex(w.csData.(reflect.Value), zero) + + // Set the new key with the existing value + m.SetMapIndex(resultVal, w.lastValue) + + // Set the key to be the new key + w.csData = resultVal + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, resultVal) + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + } + + return nil +} + +func (w *interpolationWalker) removeCurrent() { + // Append the key to the unknown keys + w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) + + for i := 1; i <= len(w.cs); i++ { + c := w.cs[len(w.cs)-i] + switch c.Kind() { + case reflect.Map: + // Zero value so that we delete the map key + var val reflect.Value + + // Get the key and delete it + k := w.csData.(reflect.Value) + c.SetMapIndex(k, val) + return + } + } + + panic("No container found for removeCurrent") +} + +func (w *interpolationWalker) replaceCurrent(v reflect.Value) { + c := w.cs[len(w.cs)-2] + switch c.Kind() { + case reflect.Map: + // Get the key and delete it + k := w.csKey[len(w.csKey)-1] + c.SetMapIndex(k, v) + } +} + +func (w *interpolationWalker) splitSlice() { + // Get the []interface{} slice so we can do some operations on + // it without dealing with reflection. We'll document each step + // here to be clear. + var s []interface{} + raw := w.cs[len(w.cs)-1] + switch v := raw.Interface().(type) { + case []interface{}: + s = v + case []map[string]interface{}: + return + default: + panic("Unknown kind: " + raw.Kind().String()) + } + + // Check if we have any elements that we need to split. If not, then + // just return since we're done. + split := false + if !split { + return + } + + // Make a new result slice that is twice the capacity to fit our growth. + result := make([]interface{}, 0, len(s)*2) + + // Go over each element of the original slice and start building up + // the resulting slice by splitting where we have to. + for _, v := range s { + sv, ok := v.(string) + if !ok { + // Not a string, so just set it + result = append(result, v) + continue + } + + // Not a string list, so just set it + result = append(result, sv) + } + + // Our slice is now done, we have to replace the slice now + // with this new one that we have. + w.replaceCurrent(reflect.ValueOf(result)) +} diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go new file mode 100644 index 00000000..5f4e89ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/append.go @@ -0,0 +1,86 @@ +package config + +// Append appends one configuration to another. +// +// Append assumes that both configurations will not have +// conflicting variables, resources, etc. If they do, the +// problems will be caught in the validation phase. +// +// It is possible that c1, c2 on their own are not valid. For +// example, a resource in c2 may reference a variable in c1. But +// together, they would be valid. +func Append(c1, c2 *Config) (*Config, error) { + c := new(Config) + + // Append unknown keys, but keep them unique since it is a set + unknowns := make(map[string]struct{}) + for _, k := range c1.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + + for _, k := range c2.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + + c.Atlas = c1.Atlas + if c2.Atlas != nil { + c.Atlas = c2.Atlas + } + + // merge Terraform blocks + if c1.Terraform != nil { + c.Terraform = c1.Terraform + if c2.Terraform != nil { + c.Terraform.Merge(c2.Terraform) + } + } else { + c.Terraform = c2.Terraform + } + + if len(c1.Modules) > 0 || len(c2.Modules) > 0 { + c.Modules = make( + []*Module, 0, len(c1.Modules)+len(c2.Modules)) + c.Modules = append(c.Modules, c1.Modules...) + c.Modules = append(c.Modules, c2.Modules...) + } + + if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 { + c.Outputs = make( + []*Output, 0, len(c1.Outputs)+len(c2.Outputs)) + c.Outputs = append(c.Outputs, c1.Outputs...) + c.Outputs = append(c.Outputs, c2.Outputs...) + } + + if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 { + c.ProviderConfigs = make( + []*ProviderConfig, + 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs)) + c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...) + c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...) + } + + if len(c1.Resources) > 0 || len(c2.Resources) > 0 { + c.Resources = make( + []*Resource, + 0, len(c1.Resources)+len(c2.Resources)) + c.Resources = append(c.Resources, c1.Resources...) + c.Resources = append(c.Resources, c2.Resources...) + } + + if len(c1.Variables) > 0 || len(c2.Variables) > 0 { + c.Variables = make( + []*Variable, 0, len(c1.Variables)+len(c2.Variables)) + c.Variables = append(c.Variables, c1.Variables...) + c.Variables = append(c.Variables, c2.Variables...) + } + + return c, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go new file mode 100644 index 00000000..9a764ace --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -0,0 +1,1096 @@ +// The config package is responsible for loading and validating the +// configuration. +package config + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/mitchellh/reflectwalk" +) + +// NameRegexp is the regular expression that all names (modules, providers, +// resources, etc.) must follow. +var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`) + +// Config is the configuration that comes from loading a collection +// of Terraform templates. +type Config struct { + // Dir is the path to the directory where this configuration was + // loaded from. If it is blank, this configuration wasn't loaded from + // any meaningful directory. + Dir string + + Terraform *Terraform + Atlas *AtlasConfig + Modules []*Module + ProviderConfigs []*ProviderConfig + Resources []*Resource + Variables []*Variable + Outputs []*Output + + // The fields below can be filled in by loaders for validation + // purposes. + unknownKeys []string +} + +// AtlasConfig is the configuration for building in HashiCorp's Atlas. +type AtlasConfig struct { + Name string + Include []string + Exclude []string +} + +// Module is a module used within a configuration. +// +// This does not represent a module itself, this represents a module +// call-site within an existing configuration. +type Module struct { + Name string + Source string + RawConfig *RawConfig +} + +// ProviderConfig is the configuration for a resource provider. +// +// For example, Terraform needs to set the AWS access keys for the AWS +// resource provider. +type ProviderConfig struct { + Name string + Alias string + RawConfig *RawConfig +} + +// A resource represents a single Terraform resource in the configuration. +// A Terraform resource is something that supports some or all of the +// usual "create, read, update, delete" operations, depending on +// the given Mode. +type Resource struct { + Mode ResourceMode // which operations the resource supports + Name string + Type string + RawCount *RawConfig + RawConfig *RawConfig + Provisioners []*Provisioner + Provider string + DependsOn []string + Lifecycle ResourceLifecycle +} + +// Copy returns a copy of this Resource. Helpful for avoiding shared +// config pointers across multiple pieces of the graph that need to do +// interpolation. +func (r *Resource) Copy() *Resource { + n := &Resource{ + Mode: r.Mode, + Name: r.Name, + Type: r.Type, + RawCount: r.RawCount.Copy(), + RawConfig: r.RawConfig.Copy(), + Provisioners: make([]*Provisioner, 0, len(r.Provisioners)), + Provider: r.Provider, + DependsOn: make([]string, len(r.DependsOn)), + Lifecycle: *r.Lifecycle.Copy(), + } + for _, p := range r.Provisioners { + n.Provisioners = append(n.Provisioners, p.Copy()) + } + copy(n.DependsOn, r.DependsOn) + return n +} + +// ResourceLifecycle is used to store the lifecycle tuning parameters +// to allow customized behavior +type ResourceLifecycle struct { + CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` + PreventDestroy bool `mapstructure:"prevent_destroy"` + IgnoreChanges []string `mapstructure:"ignore_changes"` +} + +// Copy returns a copy of this ResourceLifecycle +func (r *ResourceLifecycle) Copy() *ResourceLifecycle { + n := &ResourceLifecycle{ + CreateBeforeDestroy: r.CreateBeforeDestroy, + PreventDestroy: r.PreventDestroy, + IgnoreChanges: make([]string, len(r.IgnoreChanges)), + } + copy(n.IgnoreChanges, r.IgnoreChanges) + return n +} + +// Provisioner is a configured provisioner step on a resource. +type Provisioner struct { + Type string + RawConfig *RawConfig + ConnInfo *RawConfig + + When ProvisionerWhen + OnFailure ProvisionerOnFailure +} + +// Copy returns a copy of this Provisioner +func (p *Provisioner) Copy() *Provisioner { + return &Provisioner{ + Type: p.Type, + RawConfig: p.RawConfig.Copy(), + ConnInfo: p.ConnInfo.Copy(), + When: p.When, + OnFailure: p.OnFailure, + } +} + +// Variable is a variable defined within the configuration. +type Variable struct { + Name string + DeclaredType string `mapstructure:"type"` + Default interface{} + Description string +} + +// Output is an output defined within the configuration. An output is +// resulting data that is highlighted by Terraform when finished. An +// output marked Sensitive will be output in a masked form following +// application, but will still be available in state. +type Output struct { + Name string + DependsOn []string + Description string + Sensitive bool + RawConfig *RawConfig +} + +// VariableType is the type of value a variable is holding, and returned +// by the Type() function on variables. +type VariableType byte + +const ( + VariableTypeUnknown VariableType = iota + VariableTypeString + VariableTypeList + VariableTypeMap +) + +func (v VariableType) Printable() string { + switch v { + case VariableTypeString: + return "string" + case VariableTypeMap: + return "map" + case VariableTypeList: + return "list" + default: + return "unknown" + } +} + +// ProviderConfigName returns the name of the provider configuration in +// the given mapping that maps to the proper provider configuration +// for this resource. +func ProviderConfigName(t string, pcs []*ProviderConfig) string { + lk := "" + for _, v := range pcs { + k := v.Name + if strings.HasPrefix(t, k) && len(k) > len(lk) { + lk = k + } + } + + return lk +} + +// A unique identifier for this module. +func (r *Module) Id() string { + return fmt.Sprintf("%s", r.Name) +} + +// Count returns the count of this resource. +func (r *Resource) Count() (int, error) { + raw := r.RawCount.Value() + count, ok := r.RawCount.Value().(string) + if !ok { + return 0, fmt.Errorf( + "expected count to be a string or int, got %T", raw) + } + + v, err := strconv.ParseInt(count, 0, 0) + if err != nil { + return 0, err + } + + return int(v), nil +} + +// A unique identifier for this resource. +func (r *Resource) Id() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + panic(fmt.Errorf("unknown resource mode %s", r.Mode)) + } +} + +// Validate does some basic semantic checking of the configuration. +func (c *Config) Validate() error { + if c == nil { + return nil + } + + var errs []error + + for _, k := range c.unknownKeys { + errs = append(errs, fmt.Errorf( + "Unknown root level key: %s", k)) + } + + // Validate the Terraform config + if tf := c.Terraform; tf != nil { + errs = append(errs, c.Terraform.Validate()...) + } + + vars := c.InterpolatedVariables() + varMap := make(map[string]*Variable) + for _, v := range c.Variables { + if _, ok := varMap[v.Name]; ok { + errs = append(errs, fmt.Errorf( + "Variable '%s': duplicate found. Variable names must be unique.", + v.Name)) + } + + varMap[v.Name] = v + } + + for k, _ := range varMap { + if !NameRegexp.MatchString(k) { + errs = append(errs, fmt.Errorf( + "variable %q: variable name must match regular expresion %s", + k, NameRegexp)) + } + } + + for _, v := range c.Variables { + if v.Type() == VariableTypeUnknown { + errs = append(errs, fmt.Errorf( + "Variable '%s': must be a string or a map", + v.Name)) + continue + } + + interp := false + fn := func(n ast.Node) (interface{}, error) { + // LiteralNode is a literal string (outside of a ${ ... } sequence). + // interpolationWalker skips most of these. but in particular it + // visits those that have escaped sequences (like $${foo}) as a + // signal that *some* processing is required on this string. For + // our purposes here though, this is fine and not an interpolation. + if _, ok := n.(*ast.LiteralNode); !ok { + interp = true + } + return "", nil + } + + w := &interpolationWalker{F: fn} + if v.Default != nil { + if err := reflectwalk.Walk(v.Default, w); err == nil { + if interp { + errs = append(errs, fmt.Errorf( + "Variable '%s': cannot contain interpolations", + v.Name)) + } + } + } + } + + // Check for references to user variables that do not actually + // exist and record those errors. + for source, vs := range vars { + for _, v := range vs { + uv, ok := v.(*UserVariable) + if !ok { + continue + } + + if _, ok := varMap[uv.Name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", + source, + uv.Name)) + } + } + } + + // Check that all count variables are valid. + for source, vs := range vars { + for _, rawV := range vs { + switch v := rawV.(type) { + case *CountVariable: + if v.Type == CountValueInvalid { + errs = append(errs, fmt.Errorf( + "%s: invalid count variable: %s", + source, + v.FullKey())) + } + case *PathVariable: + if v.Type == PathValueInvalid { + errs = append(errs, fmt.Errorf( + "%s: invalid path variable: %s", + source, + v.FullKey())) + } + } + } + } + + // Check that providers aren't declared multiple times. + providerSet := make(map[string]struct{}) + for _, p := range c.ProviderConfigs { + name := p.FullName() + if _, ok := providerSet[name]; ok { + errs = append(errs, fmt.Errorf( + "provider.%s: declared multiple times, you can only declare a provider once", + name)) + continue + } + + providerSet[name] = struct{}{} + } + + // Check that all references to modules are valid + modules := make(map[string]*Module) + dupped := make(map[string]struct{}) + for _, m := range c.Modules { + // Check for duplicates + if _, ok := modules[m.Id()]; ok { + if _, ok := dupped[m.Id()]; !ok { + dupped[m.Id()] = struct{}{} + + errs = append(errs, fmt.Errorf( + "%s: module repeated multiple times", + m.Id())) + } + + // Already seen this module, just skip it + continue + } + + modules[m.Id()] = m + + // Check that the source has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": m.Source, + }) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: module source error: %s", + m.Id(), err)) + } else if len(rc.Interpolations) > 0 { + errs = append(errs, fmt.Errorf( + "%s: module source cannot contain interpolations", + m.Id())) + } + + // Check that the name matches our regexp + if !NameRegexp.Match([]byte(m.Name)) { + errs = append(errs, fmt.Errorf( + "%s: module name can only contain letters, numbers, "+ + "dashes, and underscores", + m.Id())) + } + + // Check that the configuration can all be strings, lists or maps + raw := make(map[string]interface{}) + for k, v := range m.RawConfig.Raw { + var strVal string + if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { + raw[k] = strVal + continue + } + + var mapVal map[string]interface{} + if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil { + raw[k] = mapVal + continue + } + + var sliceVal []interface{} + if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil { + raw[k] = sliceVal + continue + } + + errs = append(errs, fmt.Errorf( + "%s: variable %s must be a string, list or map value", + m.Id(), k)) + } + + // Check for invalid count variables + for _, v := range m.RawConfig.Variables { + switch v.(type) { + case *CountVariable: + errs = append(errs, fmt.Errorf( + "%s: count variables are only valid within resources", m.Name)) + case *SelfVariable: + errs = append(errs, fmt.Errorf( + "%s: self variables are only valid within resources", m.Name)) + } + } + + // Update the raw configuration to only contain the string values + m.RawConfig, err = NewRawConfig(raw) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: can't initialize configuration: %s", + m.Id(), err)) + } + } + dupped = nil + + // Check that all variables for modules reference modules that + // exist. + for source, vs := range vars { + for _, v := range vs { + mv, ok := v.(*ModuleVariable) + if !ok { + continue + } + + if _, ok := modules[mv.Name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: unknown module referenced: %s", + source, + mv.Name)) + } + } + } + + // Check that all references to resources are valid + resources := make(map[string]*Resource) + dupped = make(map[string]struct{}) + for _, r := range c.Resources { + if _, ok := resources[r.Id()]; ok { + if _, ok := dupped[r.Id()]; !ok { + dupped[r.Id()] = struct{}{} + + errs = append(errs, fmt.Errorf( + "%s: resource repeated multiple times", + r.Id())) + } + } + + resources[r.Id()] = r + } + dupped = nil + + // Validate resources + for n, r := range resources { + // Verify count variables + for _, v := range r.RawCount.Variables { + switch v.(type) { + case *CountVariable: + errs = append(errs, fmt.Errorf( + "%s: resource count can't reference count variable: %s", + n, + v.FullKey())) + case *SimpleVariable: + errs = append(errs, fmt.Errorf( + "%s: resource count can't reference variable: %s", + n, + v.FullKey())) + + // Good + case *ModuleVariable: + case *ResourceVariable: + case *TerraformVariable: + case *UserVariable: + + default: + errs = append(errs, fmt.Errorf( + "Internal error. Unknown type in count var in %s: %T", + n, v)) + } + } + + // Interpolate with a fixed number to verify that its a number. + r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { + // Execute the node but transform the AST so that it returns + // a fixed value of "5" for all interpolations. + result, err := hil.Eval( + hil.FixedValueTransform( + root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), + nil) + if err != nil { + return "", err + } + + return result.Value, nil + }) + _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: resource count must be an integer", + n)) + } + r.RawCount.init() + + // Validate DependsOn + errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) + + // Verify provisioners + for _, p := range r.Provisioners { + // This validation checks that there are now splat variables + // referencing ourself. This currently is not allowed. + + for _, v := range p.ConnInfo.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { + errs = append(errs, fmt.Errorf( + "%s: connection info cannot contain splat variable "+ + "referencing itself", n)) + break + } + } + + for _, v := range p.RawConfig.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { + errs = append(errs, fmt.Errorf( + "%s: connection info cannot contain splat variable "+ + "referencing itself", n)) + break + } + } + + // Check for invalid when/onFailure values, though this should be + // picked up by the loader we check here just in case. + if p.When == ProvisionerWhenInvalid { + errs = append(errs, fmt.Errorf( + "%s: provisioner 'when' value is invalid", n)) + } + if p.OnFailure == ProvisionerOnFailureInvalid { + errs = append(errs, fmt.Errorf( + "%s: provisioner 'on_failure' value is invalid", n)) + } + } + + // Verify ignore_changes contains valid entries + for _, v := range r.Lifecycle.IgnoreChanges { + if strings.Contains(v, "*") && v != "*" { + errs = append(errs, fmt.Errorf( + "%s: ignore_changes does not support using a partial string "+ + "together with a wildcard: %s", n, v)) + } + } + + // Verify ignore_changes has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": r.Lifecycle.IgnoreChanges, + }) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: lifecycle ignore_changes error: %s", + n, err)) + } else if len(rc.Interpolations) > 0 { + errs = append(errs, fmt.Errorf( + "%s: lifecycle ignore_changes cannot contain interpolations", + n)) + } + + // If it is a data source then it can't have provisioners + if r.Mode == DataResourceMode { + if _, ok := r.RawConfig.Raw["provisioner"]; ok { + errs = append(errs, fmt.Errorf( + "%s: data sources cannot have provisioners", + n)) + } + } + } + + for source, vs := range vars { + for _, v := range vs { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + id := rv.ResourceId() + if _, ok := resources[id]; !ok { + errs = append(errs, fmt.Errorf( + "%s: unknown resource '%s' referenced in variable %s", + source, + id, + rv.FullKey())) + continue + } + } + } + + // Check that all outputs are valid + { + found := make(map[string]struct{}) + for _, o := range c.Outputs { + // Verify the output is new + if _, ok := found[o.Name]; ok { + errs = append(errs, fmt.Errorf( + "%s: duplicate output. output names must be unique.", + o.Name)) + continue + } + found[o.Name] = struct{}{} + + var invalidKeys []string + valueKeyFound := false + for k := range o.RawConfig.Raw { + if k == "value" { + valueKeyFound = true + continue + } + if k == "sensitive" { + if sensitive, ok := o.RawConfig.config[k].(bool); ok { + if sensitive { + o.Sensitive = true + } + continue + } + + errs = append(errs, fmt.Errorf( + "%s: value for 'sensitive' must be boolean", + o.Name)) + continue + } + if k == "description" { + if desc, ok := o.RawConfig.config[k].(string); ok { + o.Description = desc + continue + } + + errs = append(errs, fmt.Errorf( + "%s: value for 'description' must be string", + o.Name)) + continue + } + invalidKeys = append(invalidKeys, k) + } + if len(invalidKeys) > 0 { + errs = append(errs, fmt.Errorf( + "%s: output has invalid keys: %s", + o.Name, strings.Join(invalidKeys, ", "))) + } + if !valueKeyFound { + errs = append(errs, fmt.Errorf( + "%s: output is missing required 'value' key", o.Name)) + } + + for _, v := range o.RawConfig.Variables { + if _, ok := v.(*CountVariable); ok { + errs = append(errs, fmt.Errorf( + "%s: count variables are only valid within resources", o.Name)) + } + } + } + } + + // Check that all variables are in the proper context + for source, rc := range c.rawConfigs() { + walker := &interpolationWalker{ + ContextF: c.validateVarContextFn(source, &errs), + } + if err := reflectwalk.Walk(rc.Raw, walker); err != nil { + errs = append(errs, fmt.Errorf( + "%s: error reading config: %s", source, err)) + } + } + + // Validate the self variable + for source, rc := range c.rawConfigs() { + // Ignore provisioners. This is a pretty brittle way to do this, + // but better than also repeating all the resources. + if strings.Contains(source, "provision") { + continue + } + + for _, v := range rc.Variables { + if _, ok := v.(*SelfVariable); ok { + errs = append(errs, fmt.Errorf( + "%s: cannot contain self-reference %s", source, v.FullKey())) + } + } + } + + if len(errs) > 0 { + return &multierror.Error{Errors: errs} + } + + return nil +} + +// InterpolatedVariables is a helper that returns a mapping of all the interpolated +// variables within the configuration. This is used to verify references +// are valid in the Validate step. +func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable { + result := make(map[string][]InterpolatedVariable) + for source, rc := range c.rawConfigs() { + for _, v := range rc.Variables { + result[source] = append(result[source], v) + } + } + return result +} + +// rawConfigs returns all of the RawConfigs that are available keyed by +// a human-friendly source. +func (c *Config) rawConfigs() map[string]*RawConfig { + result := make(map[string]*RawConfig) + for _, m := range c.Modules { + source := fmt.Sprintf("module '%s'", m.Name) + result[source] = m.RawConfig + } + + for _, pc := range c.ProviderConfigs { + source := fmt.Sprintf("provider config '%s'", pc.Name) + result[source] = pc.RawConfig + } + + for _, rc := range c.Resources { + source := fmt.Sprintf("resource '%s'", rc.Id()) + result[source+" count"] = rc.RawCount + result[source+" config"] = rc.RawConfig + + for i, p := range rc.Provisioners { + subsource := fmt.Sprintf( + "%s provisioner %s (#%d)", + source, p.Type, i+1) + result[subsource] = p.RawConfig + } + } + + for _, o := range c.Outputs { + source := fmt.Sprintf("output '%s'", o.Name) + result[source] = o.RawConfig + } + + return result +} + +func (c *Config) validateVarContextFn( + source string, errs *[]error) interpolationWalkerContextFunc { + return func(loc reflectwalk.Location, node ast.Node) { + // If we're in a slice element, then its fine, since you can do + // anything in there. + if loc == reflectwalk.SliceElem { + return + } + + // Otherwise, let's check if there is a splat resource variable + // at the top level in here. We do this by doing a transform that + // replaces everything with a noop node unless its a variable + // access or concat. This should turn the AST into a flat tree + // of Concat(Noop, ...). If there are any variables left that are + // multi-access, then its still broken. + node = node.Accept(func(n ast.Node) ast.Node { + // If it is a concat or variable access, we allow it. + switch n.(type) { + case *ast.Output: + return n + case *ast.VariableAccess: + return n + } + + // Otherwise, noop + return &noopNode{} + }) + + vars, err := DetectVariables(node) + if err != nil { + // Ignore it since this will be caught during parse. This + // actually probably should never happen by the time this + // is called, but its okay. + return + } + + for _, v := range vars { + rv, ok := v.(*ResourceVariable) + if !ok { + return + } + + if rv.Multi && rv.Index == -1 { + *errs = append(*errs, fmt.Errorf( + "%s: use of the splat ('*') operator must be wrapped in a list declaration", + source)) + } + } + } +} + +func (c *Config) validateDependsOn( + n string, + v []string, + resources map[string]*Resource, + modules map[string]*Module) []error { + // Verify depends on points to resources that all exist + var errs []error + for _, d := range v { + // Check if we contain interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "value": d, + }) + if err == nil && len(rc.Variables) > 0 { + errs = append(errs, fmt.Errorf( + "%s: depends on value cannot contain interpolations: %s", + n, d)) + continue + } + + // If it is a module, verify it is a module + if strings.HasPrefix(d, "module.") { + name := d[len("module."):] + if _, ok := modules[name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: resource depends on non-existent module '%s'", + n, name)) + } + + continue + } + + // Check resources + if _, ok := resources[d]; !ok { + errs = append(errs, fmt.Errorf( + "%s: resource depends on non-existent resource '%s'", + n, d)) + } + } + + return errs +} + +func (m *Module) mergerName() string { + return m.Id() +} + +func (m *Module) mergerMerge(other merger) merger { + m2 := other.(*Module) + + result := *m + result.Name = m2.Name + result.RawConfig = result.RawConfig.merge(m2.RawConfig) + + if m2.Source != "" { + result.Source = m2.Source + } + + return &result +} + +func (o *Output) mergerName() string { + return o.Name +} + +func (o *Output) mergerMerge(m merger) merger { + o2 := m.(*Output) + + result := *o + result.Name = o2.Name + result.Description = o2.Description + result.RawConfig = result.RawConfig.merge(o2.RawConfig) + result.Sensitive = o2.Sensitive + result.DependsOn = o2.DependsOn + + return &result +} + +func (c *ProviderConfig) GoString() string { + return fmt.Sprintf("*%#v", *c) +} + +func (c *ProviderConfig) FullName() string { + if c.Alias == "" { + return c.Name + } + + return fmt.Sprintf("%s.%s", c.Name, c.Alias) +} + +func (c *ProviderConfig) mergerName() string { + return c.Name +} + +func (c *ProviderConfig) mergerMerge(m merger) merger { + c2 := m.(*ProviderConfig) + + result := *c + result.Name = c2.Name + result.RawConfig = result.RawConfig.merge(c2.RawConfig) + + if c2.Alias != "" { + result.Alias = c2.Alias + } + + return &result +} + +func (r *Resource) mergerName() string { + return r.Id() +} + +func (r *Resource) mergerMerge(m merger) merger { + r2 := m.(*Resource) + + result := *r + result.Mode = r2.Mode + result.Name = r2.Name + result.Type = r2.Type + result.RawConfig = result.RawConfig.merge(r2.RawConfig) + + if r2.RawCount.Value() != "1" { + result.RawCount = r2.RawCount + } + + if len(r2.Provisioners) > 0 { + result.Provisioners = r2.Provisioners + } + + return &result +} + +// Merge merges two variables to create a new third variable. +func (v *Variable) Merge(v2 *Variable) *Variable { + // Shallow copy the variable + result := *v + + // The names should be the same, but the second name always wins. + result.Name = v2.Name + + if v2.DeclaredType != "" { + result.DeclaredType = v2.DeclaredType + } + if v2.Default != nil { + result.Default = v2.Default + } + if v2.Description != "" { + result.Description = v2.Description + } + + return &result +} + +var typeStringMap = map[string]VariableType{ + "string": VariableTypeString, + "map": VariableTypeMap, + "list": VariableTypeList, +} + +// Type returns the type of variable this is. +func (v *Variable) Type() VariableType { + if v.DeclaredType != "" { + declaredType, ok := typeStringMap[v.DeclaredType] + if !ok { + return VariableTypeUnknown + } + + return declaredType + } + + return v.inferTypeFromDefault() +} + +// ValidateTypeAndDefault ensures that default variable value is compatible +// with the declared type (if one exists), and that the type is one which is +// known to Terraform +func (v *Variable) ValidateTypeAndDefault() error { + // If an explicit type is declared, ensure it is valid + if v.DeclaredType != "" { + if _, ok := typeStringMap[v.DeclaredType]; !ok { + validTypes := []string{} + for k := range typeStringMap { + validTypes = append(validTypes, k) + } + return fmt.Errorf( + "Variable '%s' type must be one of [%s] - '%s' is not a valid type", + v.Name, + strings.Join(validTypes, ", "), + v.DeclaredType, + ) + } + } + + if v.DeclaredType == "" || v.Default == nil { + return nil + } + + if v.inferTypeFromDefault() != v.Type() { + return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')", + v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable()) + } + + return nil +} + +func (v *Variable) mergerName() string { + return v.Name +} + +func (v *Variable) mergerMerge(m merger) merger { + return v.Merge(m.(*Variable)) +} + +// Required tests whether a variable is required or not. +func (v *Variable) Required() bool { + return v.Default == nil +} + +// inferTypeFromDefault contains the logic for the old method of inferring +// variable types - we can also use this for validating that the declared +// type matches the type of the default value +func (v *Variable) inferTypeFromDefault() VariableType { + if v.Default == nil { + return VariableTypeString + } + + var s string + if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil { + v.Default = s + return VariableTypeString + } + + var m map[string]interface{} + if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil { + v.Default = m + return VariableTypeMap + } + + var l []interface{} + if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil { + v.Default = l + return VariableTypeList + } + + return VariableTypeUnknown +} + +func (m ResourceMode) Taintable() bool { + switch m { + case ManagedResourceMode: + return true + case DataResourceMode: + return false + default: + panic(fmt.Errorf("unsupported ResourceMode value %s", m)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go new file mode 100644 index 00000000..0b3abbcd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config_string.go @@ -0,0 +1,338 @@ +package config + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// TestString is a Stringer-like function that outputs a string that can +// be used to easily compare multiple Config structures in unit tests. +// +// This function has no practical use outside of unit tests and debugging. +func (c *Config) TestString() string { + if c == nil { + return "" + } + + var buf bytes.Buffer + if len(c.Modules) > 0 { + buf.WriteString("Modules:\n\n") + buf.WriteString(modulesStr(c.Modules)) + buf.WriteString("\n\n") + } + + if len(c.Variables) > 0 { + buf.WriteString("Variables:\n\n") + buf.WriteString(variablesStr(c.Variables)) + buf.WriteString("\n\n") + } + + if len(c.ProviderConfigs) > 0 { + buf.WriteString("Provider Configs:\n\n") + buf.WriteString(providerConfigsStr(c.ProviderConfigs)) + buf.WriteString("\n\n") + } + + if len(c.Resources) > 0 { + buf.WriteString("Resources:\n\n") + buf.WriteString(resourcesStr(c.Resources)) + buf.WriteString("\n\n") + } + + if len(c.Outputs) > 0 { + buf.WriteString("Outputs:\n\n") + buf.WriteString(outputsStr(c.Outputs)) + buf.WriteString("\n") + } + + return strings.TrimSpace(buf.String()) +} + +func terraformStr(t *Terraform) string { + result := "" + + if b := t.Backend; b != nil { + result += fmt.Sprintf("backend (%s)\n", b.Type) + + keys := make([]string, 0, len(b.RawConfig.Raw)) + for k, _ := range b.RawConfig.Raw { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + result += fmt.Sprintf(" %s\n", k) + } + } + + return strings.TrimSpace(result) +} + +func modulesStr(ms []*Module) string { + result := "" + order := make([]int, 0, len(ms)) + ks := make([]string, 0, len(ms)) + mapping := make(map[string]int) + for i, m := range ms { + k := m.Id() + ks = append(ks, k) + mapping[k] = i + } + sort.Strings(ks) + for _, k := range ks { + order = append(order, mapping[k]) + } + + for _, i := range order { + m := ms[i] + result += fmt.Sprintf("%s\n", m.Id()) + + ks := make([]string, 0, len(m.RawConfig.Raw)) + for k, _ := range m.RawConfig.Raw { + ks = append(ks, k) + } + sort.Strings(ks) + + result += fmt.Sprintf(" source = %s\n", m.Source) + + for _, k := range ks { + result += fmt.Sprintf(" %s\n", k) + } + } + + return strings.TrimSpace(result) +} + +func outputsStr(os []*Output) string { + ns := make([]string, 0, len(os)) + m := make(map[string]*Output) + for _, o := range os { + ns = append(ns, o.Name) + m[o.Name] = o + } + sort.Strings(ns) + + result := "" + for _, n := range ns { + o := m[n] + + result += fmt.Sprintf("%s\n", n) + + if len(o.DependsOn) > 0 { + result += fmt.Sprintf(" dependsOn\n") + for _, d := range o.DependsOn { + result += fmt.Sprintf(" %s\n", d) + } + } + + if len(o.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range o.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } + } + + return strings.TrimSpace(result) +} + +// This helper turns a provider configs field into a deterministic +// string value for comparison in tests. +func providerConfigsStr(pcs []*ProviderConfig) string { + result := "" + + ns := make([]string, 0, len(pcs)) + m := make(map[string]*ProviderConfig) + for _, n := range pcs { + ns = append(ns, n.Name) + m[n.Name] = n + } + sort.Strings(ns) + + for _, n := range ns { + pc := m[n] + + result += fmt.Sprintf("%s\n", n) + + keys := make([]string, 0, len(pc.RawConfig.Raw)) + for k, _ := range pc.RawConfig.Raw { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + result += fmt.Sprintf(" %s\n", k) + } + + if len(pc.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range pc.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } + } + + return strings.TrimSpace(result) +} + +// This helper turns a resources field into a deterministic +// string value for comparison in tests. +func resourcesStr(rs []*Resource) string { + result := "" + order := make([]int, 0, len(rs)) + ks := make([]string, 0, len(rs)) + mapping := make(map[string]int) + for i, r := range rs { + k := r.Id() + ks = append(ks, k) + mapping[k] = i + } + sort.Strings(ks) + for _, k := range ks { + order = append(order, mapping[k]) + } + + for _, i := range order { + r := rs[i] + result += fmt.Sprintf( + "%s (x%s)\n", + r.Id(), + r.RawCount.Value()) + + ks := make([]string, 0, len(r.RawConfig.Raw)) + for k, _ := range r.RawConfig.Raw { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + result += fmt.Sprintf(" %s\n", k) + } + + if len(r.Provisioners) > 0 { + result += fmt.Sprintf(" provisioners\n") + for _, p := range r.Provisioners { + when := "" + if p.When != ProvisionerWhenCreate { + when = fmt.Sprintf(" (%s)", p.When.String()) + } + + result += fmt.Sprintf(" %s%s\n", p.Type, when) + + if p.OnFailure != ProvisionerOnFailureFail { + result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String()) + } + + ks := make([]string, 0, len(p.RawConfig.Raw)) + for k, _ := range p.RawConfig.Raw { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + result += fmt.Sprintf(" %s\n", k) + } + } + } + + if len(r.DependsOn) > 0 { + result += fmt.Sprintf(" dependsOn\n") + for _, d := range r.DependsOn { + result += fmt.Sprintf(" %s\n", d) + } + } + + if len(r.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + + ks := make([]string, 0, len(r.RawConfig.Variables)) + for k, _ := range r.RawConfig.Variables { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + rawV := r.RawConfig.Variables[k] + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } + } + + return strings.TrimSpace(result) +} + +// This helper turns a variables field into a deterministic +// string value for comparison in tests. +func variablesStr(vs []*Variable) string { + result := "" + ks := make([]string, 0, len(vs)) + m := make(map[string]*Variable) + for _, v := range vs { + ks = append(ks, v.Name) + m[v.Name] = v + } + sort.Strings(ks) + + for _, k := range ks { + v := m[k] + + required := "" + if v.Required() { + required = " (required)" + } + + declaredType := "" + if v.DeclaredType != "" { + declaredType = fmt.Sprintf(" (%s)", v.DeclaredType) + } + + if v.Default == nil || v.Default == "" { + v.Default = "<>" + } + if v.Description == "" { + v.Description = "<>" + } + + result += fmt.Sprintf( + "%s%s%s\n %v\n %s\n", + k, + required, + declaredType, + v.Default, + v.Description) + } + + return strings.TrimSpace(result) +} diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go new file mode 100644 index 00000000..08dc0fe9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config_tree.go @@ -0,0 +1,43 @@ +package config + +// configTree represents a tree of configurations where the root is the +// first file and its children are the configurations it has imported. +type configTree struct { + Path string + Config *Config + Children []*configTree +} + +// Flatten flattens the entire tree down to a single merged Config +// structure. +func (t *configTree) Flatten() (*Config, error) { + // No children is easy: we're already merged! + if len(t.Children) == 0 { + return t.Config, nil + } + + // Depth-first, merge all the children first. + childConfigs := make([]*Config, len(t.Children)) + for i, ct := range t.Children { + c, err := ct.Flatten() + if err != nil { + return nil, err + } + + childConfigs[i] = c + } + + // Merge all the children in order + config := childConfigs[0] + childConfigs = childConfigs[1:] + for _, config2 := range childConfigs { + var err error + config, err = Merge(config, config2) + if err != nil { + return nil, err + } + } + + // Merge the final merged child config with our own + return Merge(config, t.Config) +} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go new file mode 100644 index 00000000..37ec11a1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go @@ -0,0 +1,113 @@ +package config + +import ( + "fmt" + "io" +) + +// configurable is an interface that must be implemented by any configuration +// formats of Terraform in order to return a *Config. +type configurable interface { + Config() (*Config, error) +} + +// importTree is the result of the first-pass load of the configuration +// files. It is a tree of raw configurables and then any children (their +// imports). +// +// An importTree can be turned into a configTree. +type importTree struct { + Path string + Raw configurable + Children []*importTree +} + +// This is the function type that must be implemented by the configuration +// file loader to turn a single file into a configurable and any additional +// imports. +type fileLoaderFunc func(path string) (configurable, []string, error) + +// loadTree takes a single file and loads the entire importTree for that +// file. This function detects what kind of configuration file it is an +// executes the proper fileLoaderFunc. +func loadTree(root string) (*importTree, error) { + var f fileLoaderFunc + switch ext(root) { + case ".tf", ".tf.json": + f = loadFileHcl + default: + } + + if f == nil { + return nil, fmt.Errorf( + "%s: unknown configuration format. Use '.tf' or '.tf.json' extension", + root) + } + + c, imps, err := f(root) + if err != nil { + return nil, err + } + + children := make([]*importTree, len(imps)) + for i, imp := range imps { + t, err := loadTree(imp) + if err != nil { + return nil, err + } + + children[i] = t + } + + return &importTree{ + Path: root, + Raw: c, + Children: children, + }, nil +} + +// Close releases any resources we might be holding open for the importTree. +// +// This can safely be called even while ConfigTree results are alive. The +// importTree is not bound to these. +func (t *importTree) Close() error { + if c, ok := t.Raw.(io.Closer); ok { + c.Close() + } + for _, ct := range t.Children { + ct.Close() + } + + return nil +} + +// ConfigTree traverses the importTree and turns each node into a *Config +// object, ultimately returning a *configTree. +func (t *importTree) ConfigTree() (*configTree, error) { + config, err := t.Raw.Config() + if err != nil { + return nil, fmt.Errorf( + "Error loading %s: %s", + t.Path, + err) + } + + // Build our result + result := &configTree{ + Path: t.Path, + Config: config, + } + + // Build the config trees for the children + result.Children = make([]*configTree, len(t.Children)) + for i, ct := range t.Children { + t, err := ct.ConfigTree() + if err != nil { + return nil, err + } + + result.Children[i] = t + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go new file mode 100644 index 00000000..bbb35554 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go @@ -0,0 +1,386 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/hil/ast" +) + +// An InterpolatedVariable is a variable reference within an interpolation. +// +// Implementations of this interface represents various sources where +// variables can come from: user variables, resources, etc. +type InterpolatedVariable interface { + FullKey() string +} + +// CountVariable is a variable for referencing information about +// the count. +type CountVariable struct { + Type CountValueType + key string +} + +// CountValueType is the type of the count variable that is referenced. +type CountValueType byte + +const ( + CountValueInvalid CountValueType = iota + CountValueIndex +) + +// A ModuleVariable is a variable that is referencing the output +// of a module, such as "${module.foo.bar}" +type ModuleVariable struct { + Name string + Field string + key string +} + +// A PathVariable is a variable that references path information about the +// module. +type PathVariable struct { + Type PathValueType + key string +} + +type PathValueType byte + +const ( + PathValueInvalid PathValueType = iota + PathValueCwd + PathValueModule + PathValueRoot +) + +// A ResourceVariable is a variable that is referencing the field +// of a resource, such as "${aws_instance.foo.ami}" +type ResourceVariable struct { + Mode ResourceMode + Type string // Resource type, i.e. "aws_instance" + Name string // Resource name + Field string // Resource field + + Multi bool // True if multi-variable: aws_instance.foo.*.id + Index int // Index for multi-variable: aws_instance.foo.1.id == 1 + + key string +} + +// SelfVariable is a variable that is referencing the same resource +// it is running on: "${self.address}" +type SelfVariable struct { + Field string + + key string +} + +// SimpleVariable is an unprefixed variable, which can show up when users have +// strings they are passing down to resources that use interpolation +// internally. The template_file resource is an example of this. +type SimpleVariable struct { + Key string +} + +// TerraformVariable is a "terraform."-prefixed variable used to access +// metadata about the Terraform run. +type TerraformVariable struct { + Field string + key string +} + +// A UserVariable is a variable that is referencing a user variable +// that is inputted from outside the configuration. This looks like +// "${var.foo}" +type UserVariable struct { + Name string + Elem string + + key string +} + +func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { + if strings.HasPrefix(v, "count.") { + return NewCountVariable(v) + } else if strings.HasPrefix(v, "path.") { + return NewPathVariable(v) + } else if strings.HasPrefix(v, "self.") { + return NewSelfVariable(v) + } else if strings.HasPrefix(v, "terraform.") { + return NewTerraformVariable(v) + } else if strings.HasPrefix(v, "var.") { + return NewUserVariable(v) + } else if strings.HasPrefix(v, "module.") { + return NewModuleVariable(v) + } else if !strings.ContainsRune(v, '.') { + return NewSimpleVariable(v) + } else { + return NewResourceVariable(v) + } +} + +func NewCountVariable(key string) (*CountVariable, error) { + var fieldType CountValueType + parts := strings.SplitN(key, ".", 2) + switch parts[1] { + case "index": + fieldType = CountValueIndex + } + + return &CountVariable{ + Type: fieldType, + key: key, + }, nil +} + +func (c *CountVariable) FullKey() string { + return c.key +} + +func NewModuleVariable(key string) (*ModuleVariable, error) { + parts := strings.SplitN(key, ".", 3) + if len(parts) < 3 { + return nil, fmt.Errorf( + "%s: module variables must be three parts: module.name.attr", + key) + } + + return &ModuleVariable{ + Name: parts[1], + Field: parts[2], + key: key, + }, nil +} + +func (v *ModuleVariable) FullKey() string { + return v.key +} + +func (v *ModuleVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewPathVariable(key string) (*PathVariable, error) { + var fieldType PathValueType + parts := strings.SplitN(key, ".", 2) + switch parts[1] { + case "cwd": + fieldType = PathValueCwd + case "module": + fieldType = PathValueModule + case "root": + fieldType = PathValueRoot + } + + return &PathVariable{ + Type: fieldType, + key: key, + }, nil +} + +func (v *PathVariable) FullKey() string { + return v.key +} + +func NewResourceVariable(key string) (*ResourceVariable, error) { + var mode ResourceMode + var parts []string + if strings.HasPrefix(key, "data.") { + mode = DataResourceMode + parts = strings.SplitN(key, ".", 4) + if len(parts) < 4 { + return nil, fmt.Errorf( + "%s: data variables must be four parts: data.TYPE.NAME.ATTR", + key) + } + + // Don't actually need the "data." prefix for parsing, since it's + // always constant. + parts = parts[1:] + } else { + mode = ManagedResourceMode + parts = strings.SplitN(key, ".", 3) + if len(parts) < 3 { + return nil, fmt.Errorf( + "%s: resource variables must be three parts: TYPE.NAME.ATTR", + key) + } + } + + field := parts[2] + multi := false + var index int + + if idx := strings.Index(field, "."); idx != -1 { + indexStr := field[:idx] + multi = indexStr == "*" + index = -1 + + if !multi { + indexInt, err := strconv.ParseInt(indexStr, 0, 0) + if err == nil { + multi = true + index = int(indexInt) + } + } + + if multi { + field = field[idx+1:] + } + } + + return &ResourceVariable{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Field: field, + Multi: multi, + Index: index, + key: key, + }, nil +} + +func (v *ResourceVariable) ResourceId() string { + switch v.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", v.Type, v.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", v.Type, v.Name) + default: + panic(fmt.Errorf("unknown resource mode %s", v.Mode)) + } +} + +func (v *ResourceVariable) FullKey() string { + return v.key +} + +func NewSelfVariable(key string) (*SelfVariable, error) { + field := key[len("self."):] + + return &SelfVariable{ + Field: field, + + key: key, + }, nil +} + +func (v *SelfVariable) FullKey() string { + return v.key +} + +func (v *SelfVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewSimpleVariable(key string) (*SimpleVariable, error) { + return &SimpleVariable{key}, nil +} + +func (v *SimpleVariable) FullKey() string { + return v.Key +} + +func (v *SimpleVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewTerraformVariable(key string) (*TerraformVariable, error) { + field := key[len("terraform."):] + return &TerraformVariable{ + Field: field, + key: key, + }, nil +} + +func (v *TerraformVariable) FullKey() string { + return v.key +} + +func (v *TerraformVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewUserVariable(key string) (*UserVariable, error) { + name := key[len("var."):] + elem := "" + if idx := strings.Index(name, "."); idx > -1 { + elem = name[idx+1:] + name = name[:idx] + } + + if len(elem) > 0 { + return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem) + } + + return &UserVariable{ + key: key, + + Name: name, + Elem: elem, + }, nil +} + +func (v *UserVariable) FullKey() string { + return v.key +} + +func (v *UserVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +// DetectVariables takes an AST root and returns all the interpolated +// variables that are detected in the AST tree. +func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { + var result []InterpolatedVariable + var resultErr error + + // Visitor callback + fn := func(n ast.Node) ast.Node { + if resultErr != nil { + return n + } + + switch vn := n.(type) { + case *ast.VariableAccess: + v, err := NewInterpolatedVariable(vn.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + case *ast.Index: + if va, ok := vn.Target.(*ast.VariableAccess); ok { + v, err := NewInterpolatedVariable(va.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + } + if va, ok := vn.Key.(*ast.VariableAccess); ok { + v, err := NewInterpolatedVariable(va.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + } + default: + return n + } + + return n + } + + // Visitor pattern + root.Accept(fn) + + if resultErr != nil { + return nil, resultErr + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go new file mode 100644 index 00000000..f1f97b04 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -0,0 +1,1390 @@ +package config + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "math" + "net" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/go-homedir" +) + +// stringSliceToVariableValue converts a string slice into the value +// required to be returned from interpolation functions which return +// TypeList. +func stringSliceToVariableValue(values []string) []ast.Variable { + output := make([]ast.Variable, len(values)) + for index, value := range values { + output[index] = ast.Variable{ + Type: ast.TypeString, + Value: value, + } + } + return output +} + +func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { + output := make([]string, len(values)) + for index, value := range values { + if value.Type != ast.TypeString { + return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String()) + } + output[index] = value.Value.(string) + } + return output, nil +} + +// Funcs is the mapping of built-in functions for configuration. +func Funcs() map[string]ast.Function { + return map[string]ast.Function{ + "basename": interpolationFuncBasename(), + "base64decode": interpolationFuncBase64Decode(), + "base64encode": interpolationFuncBase64Encode(), + "base64sha256": interpolationFuncBase64Sha256(), + "base64sha512": interpolationFuncBase64Sha512(), + "ceil": interpolationFuncCeil(), + "chomp": interpolationFuncChomp(), + "cidrhost": interpolationFuncCidrHost(), + "cidrnetmask": interpolationFuncCidrNetmask(), + "cidrsubnet": interpolationFuncCidrSubnet(), + "coalesce": interpolationFuncCoalesce(), + "coalescelist": interpolationFuncCoalesceList(), + "compact": interpolationFuncCompact(), + "concat": interpolationFuncConcat(), + "dirname": interpolationFuncDirname(), + "distinct": interpolationFuncDistinct(), + "element": interpolationFuncElement(), + "file": interpolationFuncFile(), + "matchkeys": interpolationFuncMatchKeys(), + "floor": interpolationFuncFloor(), + "format": interpolationFuncFormat(), + "formatlist": interpolationFuncFormatList(), + "index": interpolationFuncIndex(), + "join": interpolationFuncJoin(), + "jsonencode": interpolationFuncJSONEncode(), + "length": interpolationFuncLength(), + "list": interpolationFuncList(), + "log": interpolationFuncLog(), + "lower": interpolationFuncLower(), + "map": interpolationFuncMap(), + "max": interpolationFuncMax(), + "md5": interpolationFuncMd5(), + "merge": interpolationFuncMerge(), + "min": interpolationFuncMin(), + "pathexpand": interpolationFuncPathExpand(), + "uuid": interpolationFuncUUID(), + "replace": interpolationFuncReplace(), + "sha1": interpolationFuncSha1(), + "sha256": interpolationFuncSha256(), + "sha512": interpolationFuncSha512(), + "signum": interpolationFuncSignum(), + "slice": interpolationFuncSlice(), + "sort": interpolationFuncSort(), + "split": interpolationFuncSplit(), + "substr": interpolationFuncSubstr(), + "timestamp": interpolationFuncTimestamp(), + "title": interpolationFuncTitle(), + "trimspace": interpolationFuncTrimSpace(), + "upper": interpolationFuncUpper(), + "zipmap": interpolationFuncZipMap(), + } +} + +// interpolationFuncList creates a list from the parameters passed +// to it. +func interpolationFuncList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeAny, + Callback: func(args []interface{}) (interface{}, error) { + var outputList []ast.Variable + + for i, val := range args { + switch v := val.(type) { + case string: + outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v}) + case []ast.Variable: + outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v}) + case map[string]ast.Variable: + outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v}) + default: + return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i) + } + } + + // we don't support heterogeneous types, so make sure all types match the first + if len(outputList) > 0 { + firstType := outputList[0].Type + for i, v := range outputList[1:] { + if v.Type != firstType { + return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1) + } + } + } + + return outputList, nil + }, + } +} + +// interpolationFuncMap creates a map from the parameters passed +// to it. +func interpolationFuncMap() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeMap, + Variadic: true, + VariadicType: ast.TypeAny, + Callback: func(args []interface{}) (interface{}, error) { + outputMap := make(map[string]ast.Variable) + + if len(args)%2 != 0 { + return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args)) + } + + var firstType *ast.Type + for i := 0; i < len(args); i += 2 { + key, ok := args[i].(string) + if !ok { + return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1) + } + val := args[i+1] + variable, err := hil.InterfaceToVariable(val) + if err != nil { + return nil, err + } + // Enforce map type homogeneity + if firstType == nil { + firstType = &variable.Type + } else if variable.Type != *firstType { + return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable()) + } + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return outputMap, nil + }, + } +} + +// interpolationFuncCompact strips a list of multi-variable values +// (e.g. as returned by "split") of any empty strings. +func interpolationFuncCompact() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + var outputList []string + for _, val := range inputList { + strVal, ok := val.Value.(string) + if !ok { + return nil, fmt.Errorf( + "compact() may only be used with flat lists, this list contains elements of %s", + val.Type.Printable()) + } + if strVal == "" { + continue + } + + outputList = append(outputList, strVal) + } + return stringSliceToVariableValue(outputList), nil + }, + } +} + +// interpolationFuncCidrHost implements the "cidrhost" function that +// fills in the host part of a CIDR range address to create a single +// host address +func interpolationFuncCidrHost() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // starting CIDR mask + ast.TypeInt, // host number to insert + }, + ReturnType: ast.TypeString, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + hostNum := args[1].(int) + _, network, err := net.ParseCIDR(args[0].(string)) + if err != nil { + return nil, fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return nil, err + } + + return ip.String(), nil + }, + } +} + +// interpolationFuncCidrNetmask implements the "cidrnetmask" function +// that returns the subnet mask in IP address notation. +func interpolationFuncCidrNetmask() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // CIDR mask + }, + ReturnType: ast.TypeString, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + _, network, err := net.ParseCIDR(args[0].(string)) + if err != nil { + return nil, fmt.Errorf("invalid CIDR expression: %s", err) + } + + return net.IP(network.Mask).String(), nil + }, + } +} + +// interpolationFuncCidrSubnet implements the "cidrsubnet" function that +// adds an additional subnet of the given length onto an existing +// IP block expressed in CIDR notation. +func interpolationFuncCidrSubnet() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // starting CIDR mask + ast.TypeInt, // number of bits to extend the prefix + ast.TypeInt, // network number to append to the prefix + }, + ReturnType: ast.TypeString, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + extraBits := args[1].(int) + subnetNum := args[2].(int) + _, network, err := net.ParseCIDR(args[0].(string)) + if err != nil { + return nil, fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if extraBits > 32 { + return nil, fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, extraBits, subnetNum) + if err != nil { + return nil, err + } + + return newNetwork.String(), nil + }, + } +} + +// interpolationFuncCoalesce implements the "coalesce" function that +// returns the first non null / empty string from the provided input +func interpolationFuncCoalesce() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Variadic: true, + VariadicType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.(string) + + if argument != "" { + return argument, nil + } + } + return "", nil + }, + } +} + +// interpolationFuncCoalesceList implements the "coalescelist" function that +// returns the first non empty list from the provided input +func interpolationFuncCoalesceList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.([]ast.Variable) + + if len(argument) > 0 { + return argument, nil + } + } + return make([]ast.Variable, 0), nil + }, + } +} + +// interpolationFuncConcat implements the "concat" function that concatenates +// multiple lists. +func interpolationFuncConcat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + var outputList []ast.Variable + + for _, arg := range args { + for _, v := range arg.([]ast.Variable) { + switch v.Type { + case ast.TypeString: + outputList = append(outputList, v) + case ast.TypeList: + outputList = append(outputList, v) + case ast.TypeMap: + outputList = append(outputList, v) + default: + return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable()) + } + } + } + + // we don't support heterogeneous types, so make sure all types match the first + if len(outputList) > 0 { + firstType := outputList[0].Type + for _, v := range outputList[1:] { + if v.Type != firstType { + return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable()) + } + } + } + + return outputList, nil + }, + } +} + +// interpolationFuncFile implements the "file" function that allows +// loading contents from a file. +func interpolationFuncFile() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + path, err := homedir.Expand(args[0].(string)) + if err != nil { + return "", err + } + data, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + return string(data), nil + }, + } +} + +// interpolationFuncFormat implements the "format" function that does +// string formatting. +func interpolationFuncFormat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeAny, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + format := args[0].(string) + return fmt.Sprintf(format, args[1:]...), nil + }, + } +} + +// interpolationFuncMax returns the maximum of the numeric arguments +func interpolationFuncMax() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Variadic: true, + VariadicType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + max := args[0].(float64) + + for i := 1; i < len(args); i++ { + max = math.Max(max, args[i].(float64)) + } + + return max, nil + }, + } +} + +// interpolationFuncMin returns the minimum of the numeric arguments +func interpolationFuncMin() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Variadic: true, + VariadicType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + min := args[0].(float64) + + for i := 1; i < len(args); i++ { + min = math.Min(min, args[i].(float64)) + } + + return min, nil + }, + } +} + +// interpolationFuncPathExpand will expand any `~`'s found with the full file path +func interpolationFuncPathExpand() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return homedir.Expand(args[0].(string)) + }, + } +} + +// interpolationFuncCeil returns the the least integer value greater than or equal to the argument +func interpolationFuncCeil() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(math.Ceil(args[0].(float64))), nil + }, + } +} + +// interpolationFuncLog returns the logarithnm. +func interpolationFuncLog() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil + }, + } +} + +// interpolationFuncChomp removes trailing newlines from the given string +func interpolationFuncChomp() ast.Function { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return newlines.ReplaceAllString(args[0].(string), ""), nil + }, + } +} + +// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument +func interpolationFuncFloor() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(math.Floor(args[0].(float64))), nil + }, + } +} + +func interpolationFuncZipMap() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // Keys + ast.TypeList, // Values + }, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + keys := args[0].([]ast.Variable) + values := args[1].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)", + len(keys), len(values)) + } + + for i, val := range keys { + if val.Type != ast.TypeString { + return nil, fmt.Errorf("keys must be strings. value at position %d is %s", + i, val.Type.Printable()) + } + } + + result := map[string]ast.Variable{} + for i := 0; i < len(keys); i++ { + result[keys[i].Value.(string)] = values[i] + } + + return result, nil + }, + } +} + +// interpolationFuncFormatList implements the "formatlist" function that does +// string formatting on lists. +func interpolationFuncFormatList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeAny}, + Variadic: true, + VariadicType: ast.TypeAny, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + // Make a copy of the variadic part of args + // to avoid modifying the original. + varargs := make([]interface{}, len(args)-1) + copy(varargs, args[1:]) + + // Verify we have some arguments + if len(varargs) == 0 { + return nil, fmt.Errorf("no arguments to formatlist") + } + + // Convert arguments that are lists into slices. + // Confirm along the way that all lists have the same length (n). + var n int + listSeen := false + for i := 1; i < len(args); i++ { + s, ok := args[i].([]ast.Variable) + if !ok { + continue + } + + // Mark that we've seen at least one list + listSeen = true + + // Convert the ast.Variable to a slice of strings + parts, err := listVariableValueToStringSlice(s) + if err != nil { + return nil, err + } + + // otherwise the list is sent down to be indexed + varargs[i-1] = parts + + // Check length + if n == 0 { + // first list we've seen + n = len(parts) + continue + } + if n != len(parts) { + return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts)) + } + } + + // If we didn't see a list this is an error because we + // can't determine the return value length. + if !listSeen { + return nil, fmt.Errorf( + "formatlist requires at least one list argument") + } + + // Do the formatting. + format := args[0].(string) + + // Generate a list of formatted strings. + list := make([]string, n) + fmtargs := make([]interface{}, len(varargs)) + for i := 0; i < n; i++ { + for j, arg := range varargs { + switch arg := arg.(type) { + default: + fmtargs[j] = arg + case []string: + fmtargs[j] = arg[i] + } + } + list[i] = fmt.Sprintf(format, fmtargs...) + } + return stringSliceToVariableValue(list), nil + }, + } +} + +// interpolationFuncIndex implements the "index" function that allows one to +// find the index of a specific element in a list +func interpolationFuncIndex() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + haystack := args[0].([]ast.Variable) + needle := args[1].(string) + for index, element := range haystack { + if needle == element.Value { + return index, nil + } + } + return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack) + }, + } +} + +// interpolationFuncBasename implements the "dirname" function. +func interpolationFuncDirname() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return filepath.Dir(args[0].(string)), nil + }, + } +} + +// interpolationFuncDistinct implements the "distinct" function that +// removes duplicate elements from a list. +func interpolationFuncDistinct() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + var list []string + + if len(args) != 1 { + return nil, fmt.Errorf("accepts only one argument.") + } + + if argument, ok := args[0].([]ast.Variable); ok { + for _, element := range argument { + if element.Type != ast.TypeString { + return nil, fmt.Errorf( + "only works for flat lists, this list contains elements of %s", + element.Type.Printable()) + } + list = appendIfMissing(list, element.Value.(string)) + } + } + + return stringSliceToVariableValue(list), nil + }, + } +} + +// helper function to add an element to a list, if it does not already exsit +func appendIfMissing(slice []string, element string) []string { + for _, ele := range slice { + if ele == element { + return slice + } + } + return append(slice, element) +} + +// for two lists `keys` and `values` of equal length, returns all elements +// from `values` where the corresponding element from `keys` is in `searchset`. +func interpolationFuncMatchKeys() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + keys, _ := args[1].([]ast.Variable) + searchset, _ := args[2].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("length of keys and values should be equal") + } + + for i, key := range keys { + for _, search := range searchset { + if res, err := compareSimpleVariables(key, search); err != nil { + return nil, err + } else if res == true { + output = append(output, values[i]) + break + } + } + } + // if searchset is empty, then output is an empty list as well. + // if we haven't matched any key, then output is an empty list. + return output, nil + }, + } +} + +// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap +func compareSimpleVariables(a, b ast.Variable) (bool, error) { + if a.Type != b.Type { + return false, fmt.Errorf( + "won't compare items of different types %s and %s", + a.Type.Printable(), b.Type.Printable()) + } + switch a.Type { + case ast.TypeString: + return a.Value.(string) == b.Value.(string), nil + default: + return false, fmt.Errorf( + "can't compare items of type %s", + a.Type.Printable()) + } +} + +// interpolationFuncJoin implements the "join" function that allows +// multi-variable values to be joined by some character. +func interpolationFuncJoin() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeList, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + var list []string + + if len(args) < 2 { + return nil, fmt.Errorf("not enough arguments to join()") + } + + for _, arg := range args[1:] { + for _, part := range arg.([]ast.Variable) { + if part.Type != ast.TypeString { + return nil, fmt.Errorf( + "only works on flat lists, this list contains elements of %s", + part.Type.Printable()) + } + list = append(list, part.Value.(string)) + } + } + + return strings.Join(list, args[0].(string)), nil + }, + } +} + +// interpolationFuncJSONEncode implements the "jsonencode" function that encodes +// a string, list, or map as its JSON representation. For now, values in the +// list or map may only be strings. +func interpolationFuncJSONEncode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeAny}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + var toEncode interface{} + + switch typedArg := args[0].(type) { + case string: + toEncode = typedArg + + case []ast.Variable: + // We preallocate the list here. Note that it's important that in + // the length 0 case, we have an empty list rather than nil, as + // they encode differently. + // XXX It would be nice to support arbitrarily nested data here. Is + // there an inverse of hil.InterfaceToVariable? + strings := make([]string, len(typedArg)) + + for i, v := range typedArg { + if v.Type != ast.TypeString { + return "", fmt.Errorf("list elements must be strings") + } + strings[i] = v.Value.(string) + } + toEncode = strings + + case map[string]ast.Variable: + // XXX It would be nice to support arbitrarily nested data here. Is + // there an inverse of hil.InterfaceToVariable? + stringMap := make(map[string]string) + for k, v := range typedArg { + if v.Type != ast.TypeString { + return "", fmt.Errorf("map values must be strings") + } + stringMap[k] = v.Value.(string) + } + toEncode = stringMap + + default: + return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0]) + } + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil + }, + } +} + +// interpolationFuncReplace implements the "replace" function that does +// string replacement. +func interpolationFuncReplace() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + search := args[1].(string) + replace := args[2].(string) + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' { + re, err := regexp.Compile(search[1 : len(search)-1]) + if err != nil { + return nil, err + } + + return re.ReplaceAllString(s, replace), nil + } + + return strings.Replace(s, search, replace, -1), nil + }, + } +} + +func interpolationFuncLength() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeAny}, + ReturnType: ast.TypeInt, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + subject := args[0] + + switch typedSubject := subject.(type) { + case string: + return len(typedSubject), nil + case []ast.Variable: + return len(typedSubject), nil + case map[string]ast.Variable: + return len(typedSubject), nil + } + + return 0, fmt.Errorf("arguments to length() must be a string, list, or map") + }, + } +} + +func interpolationFuncSignum() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeInt, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + num := args[0].(int) + switch { + case num < 0: + return -1, nil + case num > 0: + return +1, nil + default: + return 0, nil + } + }, + } +} + +// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive. +func interpolationFuncSlice() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // from + ast.TypeInt, // to + }, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + from := args[1].(int) + to := args[2].(int) + + if from < 0 { + return nil, fmt.Errorf("from index must be >= 0") + } + if to > len(inputList) { + return nil, fmt.Errorf("to index must be <= length of the input list") + } + if from > to { + return nil, fmt.Errorf("from index must be <= to index") + } + + var outputList []ast.Variable + for i, val := range inputList { + if i >= from && i < to { + outputList = append(outputList, val) + } + } + return outputList, nil + }, + } +} + +// interpolationFuncSort sorts a list of a strings lexographically +func interpolationFuncSort() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + // Ensure that all the list members are strings and + // create a string slice from them + members := make([]string, len(inputList)) + for i, val := range inputList { + if val.Type != ast.TypeString { + return nil, fmt.Errorf( + "sort() may only be used with lists of strings - %s at index %d", + val.Type.String(), i) + } + + members[i] = val.Value.(string) + } + + sort.Strings(members) + return stringSliceToVariableValue(members), nil + }, + } +} + +// interpolationFuncSplit implements the "split" function that allows +// strings to split into multi-variable values +func interpolationFuncSplit() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + sep := args[0].(string) + s := args[1].(string) + elements := strings.Split(s, sep) + return stringSliceToVariableValue(elements), nil + }, + } +} + +// interpolationFuncLookup implements the "lookup" function that allows +// dynamic lookups of map types within a Terraform configuration. +func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString}, + ReturnType: ast.TypeString, + Variadic: true, + VariadicType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + defaultValue := "" + defaultValueSet := false + if len(args) > 2 { + defaultValue = args[2].(string) + defaultValueSet = true + } + if len(args) > 3 { + return "", fmt.Errorf("lookup() takes no more than three arguments") + } + index := args[1].(string) + mapVar := args[0].(map[string]ast.Variable) + + v, ok := mapVar[index] + if !ok { + if defaultValueSet { + return defaultValue, nil + } else { + return "", fmt.Errorf( + "lookup failed to find '%s'", + args[1].(string)) + } + } + if v.Type != ast.TypeString { + return nil, fmt.Errorf( + "lookup() may only be used with flat maps, this map contains elements of %s", + v.Type.Printable()) + } + + return v.Value.(string), nil + }, + } +} + +// interpolationFuncElement implements the "element" function that allows +// a specific index to be looked up in a multi-variable value. Note that this will +// wrap if the index is larger than the number of elements in the multi-variable value. +func interpolationFuncElement() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + list := args[0].([]ast.Variable) + if len(list) == 0 { + return nil, fmt.Errorf("element() may not be used with an empty list") + } + + index, err := strconv.Atoi(args[1].(string)) + if err != nil || index < 0 { + return "", fmt.Errorf( + "invalid number for index, got %s", args[1]) + } + + resolvedIndex := index % len(list) + + v := list[resolvedIndex] + if v.Type != ast.TypeString { + return nil, fmt.Errorf( + "element() may only be used with flat lists, this list contains elements of %s", + v.Type.Printable()) + } + return v.Value, nil + }, + } +} + +// interpolationFuncKeys implements the "keys" function that yields a list of +// keys of map types within a Terraform configuration. +func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + mapVar := args[0].(map[string]ast.Variable) + keys := make([]string, 0) + + for k, _ := range mapVar { + keys = append(keys, k) + } + + sort.Strings(keys) + + // Keys are guaranteed to be strings + return stringSliceToVariableValue(keys), nil + }, + } +} + +// interpolationFuncValues implements the "values" function that yields a list of +// keys of map types within a Terraform configuration. +func interpolationFuncValues(vs map[string]ast.Variable) ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + mapVar := args[0].(map[string]ast.Variable) + keys := make([]string, 0) + + for k, _ := range mapVar { + keys = append(keys, k) + } + + sort.Strings(keys) + + values := make([]string, len(keys)) + for index, key := range keys { + if value, ok := mapVar[key].Value.(string); ok { + values[index] = value + } else { + return "", fmt.Errorf("values(): %q has element with bad type %s", + key, mapVar[key].Type) + } + } + + variable, err := hil.InterfaceToVariable(values) + if err != nil { + return nil, err + } + + return variable.Value, nil + }, + } +} + +// interpolationFuncBasename implements the "basename" function. +func interpolationFuncBasename() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return filepath.Base(args[0].(string)), nil + }, + } +} + +// interpolationFuncBase64Encode implements the "base64encode" function that +// allows Base64 encoding. +func interpolationFuncBase64Encode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + return base64.StdEncoding.EncodeToString([]byte(s)), nil + }, + } +} + +// interpolationFuncBase64Decode implements the "base64decode" function that +// allows Base64 decoding. +func interpolationFuncBase64Decode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("failed to decode base64 data '%s'", s) + } + return string(sDec), nil + }, + } +} + +// interpolationFuncLower implements the "lower" function that does +// string lower casing. +func interpolationFuncLower() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toLower := args[0].(string) + return strings.ToLower(toLower), nil + }, + } +} + +func interpolationFuncMd5() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := md5.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +func interpolationFuncMerge() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Variadic: true, + VariadicType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + outputMap := make(map[string]ast.Variable) + + for _, arg := range args { + for k, v := range arg.(map[string]ast.Variable) { + outputMap[k] = v + } + } + + return outputMap, nil + }, + } +} + +// interpolationFuncUpper implements the "upper" function that does +// string upper casing. +func interpolationFuncUpper() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toUpper := args[0].(string) + return strings.ToUpper(toUpper), nil + }, + } +} + +func interpolationFuncSha1() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha1.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +// hexadecimal representation of sha256 sum +func interpolationFuncSha256() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha256.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +func interpolationFuncSha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +func interpolationFuncTrimSpace() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + trimSpace := args[0].(string) + return strings.TrimSpace(trimSpace), nil + }, + } +} + +func interpolationFuncBase64Sha256() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha256.New() + h.Write([]byte(s)) + shaSum := h.Sum(nil) + encoded := base64.StdEncoding.EncodeToString(shaSum[:]) + return encoded, nil + }, + } +} + +func interpolationFuncBase64Sha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + shaSum := h.Sum(nil) + encoded := base64.StdEncoding.EncodeToString(shaSum[:]) + return encoded, nil + }, + } +} + +func interpolationFuncUUID() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return uuid.GenerateUUID() + }, + } +} + +// interpolationFuncTimestamp +func interpolationFuncTimestamp() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return time.Now().UTC().Format(time.RFC3339), nil + }, + } +} + +// interpolationFuncTitle implements the "title" function that returns a copy of the +// string in which first characters of all the words are capitalized. +func interpolationFuncTitle() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toTitle := args[0].(string) + return strings.Title(toTitle), nil + }, + } +} + +// interpolationFuncSubstr implements the "substr" function that allows strings +// to be truncated. +func interpolationFuncSubstr() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input string + ast.TypeInt, // offset + ast.TypeInt, // length + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + str := args[0].(string) + offset := args[1].(int) + length := args[2].(int) + + // Interpret a negative offset as being equivalent to a positive + // offset taken from the end of the string. + if offset < 0 { + offset += len(str) + } + + // Interpret a length of `-1` as indicating that the substring + // should start at `offset` and continue until the end of the + // string. Any other negative length (other than `-1`) is invalid. + if length == -1 { + length = len(str) + } else if length >= 0 { + length += offset + } else { + return nil, fmt.Errorf("length should be a non-negative integer") + } + + if offset > len(str) { + return nil, fmt.Errorf("offset cannot be larger than the length of the string") + } + + if length > len(str) { + return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string") + } + + return str[offset:length], nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go new file mode 100644 index 00000000..ead3d102 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go @@ -0,0 +1,283 @@ +package config + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/reflectwalk" +) + +// interpolationWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// execute a callback for an interpolation. +type interpolationWalker struct { + // F is the function to call for every interpolation. It can be nil. + // + // If Replace is true, then the return value of F will be used to + // replace the interpolation. + F interpolationWalkerFunc + Replace bool + + // ContextF is an advanced version of F that also receives the + // location of where it is in the structure. This lets you do + // context-aware validation. + ContextF interpolationWalkerContextFunc + + key []string + lastValue reflect.Value + loc reflectwalk.Location + cs []reflect.Value + csKey []reflect.Value + csData interface{} + sliceIndex []int + unknownKeys []string +} + +// interpolationWalkerFunc is the callback called by interpolationWalk. +// It is called with any interpolation found. It should return a value +// to replace the interpolation with, along with any errors. +// +// If Replace is set to false in interpolationWalker, then the replace +// value can be anything as it will have no effect. +type interpolationWalkerFunc func(ast.Node) (interface{}, error) + +// interpolationWalkerContextFunc is called by interpolationWalk if +// ContextF is set. This receives both the interpolation and the location +// where the interpolation is. +// +// This callback can be used to validate the location of the interpolation +// within the configuration. +type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) + +func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { + w.loc = loc + return nil +} + +func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { + w.loc = reflectwalk.None + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + // Split any values that need to be split + w.splitSlice() + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1] + } + + return nil +} + +func (w *interpolationWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { + w.csData = k + w.csKey = append(w.csKey, k) + + if l := len(w.sliceIndex); l > 0 { + w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String())) + } else { + w.key = append(w.key, k.String()) + } + + w.lastValue = v + return nil +} + +func (w *interpolationWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + w.sliceIndex = append(w.sliceIndex, i) + return nil +} + +func (w *interpolationWalker) Primitive(v reflect.Value) error { + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + setV = v + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + astRoot, err := hil.Parse(v.String()) + if err != nil { + return err + } + + // If the AST we got is just a literal string value with the same + // value then we ignore it. We have to check if its the same value + // because it is possible to input a string, get out a string, and + // have it be different. For example: "foo-$${bar}" turns into + // "foo-${bar}" + if n, ok := astRoot.(*ast.LiteralNode); ok { + if s, ok := n.Value.(string); ok && s == v.String() { + return nil + } + } + + if w.ContextF != nil { + w.ContextF(w.loc, astRoot) + } + + if w.F == nil { + return nil + } + + replaceVal, err := w.F(astRoot) + if err != nil { + return fmt.Errorf( + "%s in:\n\n%s", + err, v.String()) + } + + if w.Replace { + // We need to determine if we need to remove this element + // if the result contains any "UnknownVariableValue" which is + // set if it is computed. This behavior is different if we're + // splitting (in a SliceElem) or not. + remove := false + if w.loc == reflectwalk.SliceElem { + switch typedReplaceVal := replaceVal.(type) { + case string: + if typedReplaceVal == UnknownVariableValue { + remove = true + } + case []interface{}: + if hasUnknownValue(typedReplaceVal) { + remove = true + } + } + } else if replaceVal == UnknownVariableValue { + remove = true + } + + if remove { + w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) + } + + resultVal := reflect.ValueOf(replaceVal) + switch w.loc { + case reflectwalk.MapKey: + m := w.cs[len(w.cs)-1] + + // Delete the old value + var zero reflect.Value + m.SetMapIndex(w.csData.(reflect.Value), zero) + + // Set the new key with the existing value + m.SetMapIndex(resultVal, w.lastValue) + + // Set the key to be the new key + w.csData = resultVal + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, resultVal) + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + } + + return nil +} + +func (w *interpolationWalker) replaceCurrent(v reflect.Value) { + // if we don't have at least 2 values, we're not going to find a map, but + // we could panic. + if len(w.cs) < 2 { + return + } + + c := w.cs[len(w.cs)-2] + switch c.Kind() { + case reflect.Map: + // Get the key and delete it + k := w.csKey[len(w.csKey)-1] + c.SetMapIndex(k, v) + } +} + +func hasUnknownValue(variable []interface{}) bool { + for _, value := range variable { + if strVal, ok := value.(string); ok { + if strVal == UnknownVariableValue { + return true + } + } + } + return false +} + +func (w *interpolationWalker) splitSlice() { + raw := w.cs[len(w.cs)-1] + + var s []interface{} + switch v := raw.Interface().(type) { + case []interface{}: + s = v + case []map[string]interface{}: + return + } + + split := false + for _, val := range s { + if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList { + split = true + } + if _, ok := val.([]interface{}); ok { + split = true + } + } + + if !split { + return + } + + result := make([]interface{}, 0) + for _, v := range s { + switch val := v.(type) { + case ast.Variable: + switch val.Type { + case ast.TypeList: + elements := val.Value.([]ast.Variable) + for _, element := range elements { + result = append(result, element.Value) + } + default: + result = append(result, val.Value) + } + case []interface{}: + for _, element := range val { + result = append(result, element) + } + default: + result = append(result, v) + } + } + + w.replaceCurrent(reflect.ValueOf(result)) +} diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go new file mode 100644 index 00000000..890d30be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/lang.go @@ -0,0 +1,11 @@ +package config + +import ( + "github.com/hashicorp/hil/ast" +) + +type noopNode struct{} + +func (n *noopNode) Accept(ast.Visitor) ast.Node { return n } +func (n *noopNode) Pos() ast.Pos { return ast.Pos{} } +func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go new file mode 100644 index 00000000..0bfa89c2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader.go @@ -0,0 +1,224 @@ +package config + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/hcl" +) + +// ErrNoConfigsFound is the error returned by LoadDir if no +// Terraform configuration files were found in the given directory. +type ErrNoConfigsFound struct { + Dir string +} + +func (e ErrNoConfigsFound) Error() string { + return fmt.Sprintf( + "No Terraform configuration files found in directory: %s", + e.Dir) +} + +// LoadJSON loads a single Terraform configuration from a given JSON document. +// +// The document must be a complete Terraform configuration. This function will +// NOT try to load any additional modules so only the given document is loaded. +func LoadJSON(raw json.RawMessage) (*Config, error) { + obj, err := hcl.Parse(string(raw)) + if err != nil { + return nil, fmt.Errorf( + "Error parsing JSON document as HCL: %s", err) + } + + // Start building the result + hclConfig := &hclConfigurable{ + Root: obj, + } + + return hclConfig.Config() +} + +// LoadFile loads the Terraform configuration from a given file. +// +// This file can be any format that Terraform recognizes, and import any +// other format that Terraform recognizes. +func LoadFile(path string) (*Config, error) { + importTree, err := loadTree(path) + if err != nil { + return nil, err + } + + configTree, err := importTree.ConfigTree() + + // Close the importTree now so that we can clear resources as quickly + // as possible. + importTree.Close() + + if err != nil { + return nil, err + } + + return configTree.Flatten() +} + +// LoadDir loads all the Terraform configuration files in a single +// directory and appends them together. +// +// Special files known as "override files" can also be present, which +// are merged into the loaded configuration. That is, the non-override +// files are loaded first to create the configuration. Then, the overrides +// are merged into the configuration to create the final configuration. +// +// Files are loaded in lexical order. +func LoadDir(root string) (*Config, error) { + files, overrides, err := dirFiles(root) + if err != nil { + return nil, err + } + if len(files) == 0 { + return nil, &ErrNoConfigsFound{Dir: root} + } + + // Determine the absolute path to the directory. + rootAbs, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + var result *Config + + // Sort the files and overrides so we have a deterministic order + sort.Strings(files) + sort.Strings(overrides) + + // Load all the regular files, append them to each other. + for _, f := range files { + c, err := LoadFile(f) + if err != nil { + return nil, err + } + + if result != nil { + result, err = Append(result, c) + if err != nil { + return nil, err + } + } else { + result = c + } + } + + // Load all the overrides, and merge them into the config + for _, f := range overrides { + c, err := LoadFile(f) + if err != nil { + return nil, err + } + + result, err = Merge(result, c) + if err != nil { + return nil, err + } + } + + // Mark the directory + result.Dir = rootAbs + + return result, nil +} + +// IsEmptyDir returns true if the directory given has no Terraform +// configuration files. +func IsEmptyDir(root string) (bool, error) { + if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { + return true, nil + } + + fs, os, err := dirFiles(root) + if err != nil { + return false, err + } + + return len(fs) == 0 && len(os) == 0, nil +} + +// Ext returns the Terraform configuration extension of the given +// path, or a blank string if it is an invalid function. +func ext(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +func dirFiles(dir string) ([]string, []string, error) { + f, err := os.Open(dir) + if err != nil { + return nil, nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, nil, err + } + if !fi.IsDir() { + return nil, nil, fmt.Errorf( + "configuration path must be a directory: %s", + dir) + } + + var files, overrides []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load + name := fi.Name() + extValue := ext(name) + if extValue == "" || isIgnoredFile(name) { + continue + } + + // Determine if we're dealing with an override + nameNoExt := name[:len(name)-len(extValue)] + override := nameNoExt == "override" || + strings.HasSuffix(nameNoExt, "_override") + + path := filepath.Join(dir, name) + if override { + overrides = append(overrides, path) + } else { + files = append(files, path) + } + } + } + + return files, overrides, nil +} + +// isIgnoredFile returns true or false depending on whether the +// provided file name is a file that should be ignored. +func isIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go new file mode 100644 index 00000000..9abb1960 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go @@ -0,0 +1,1130 @@ +package config + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" +) + +// hclConfigurable is an implementation of configurable that knows +// how to turn HCL configuration into a *Config object. +type hclConfigurable struct { + File string + Root *ast.File +} + +func (t *hclConfigurable) Config() (*Config, error) { + validKeys := map[string]struct{}{ + "atlas": struct{}{}, + "data": struct{}{}, + "module": struct{}{}, + "output": struct{}{}, + "provider": struct{}{}, + "resource": struct{}{}, + "terraform": struct{}{}, + "variable": struct{}{}, + } + + // Top-level item should be the object list + list, ok := t.Root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + // Start building up the actual configuration. + config := new(Config) + + // Terraform config + if o := list.Filter("terraform"); len(o.Items) > 0 { + var err error + config.Terraform, err = loadTerraformHcl(o) + if err != nil { + return nil, err + } + } + + // Build the variables + if vars := list.Filter("variable"); len(vars.Items) > 0 { + var err error + config.Variables, err = loadVariablesHcl(vars) + if err != nil { + return nil, err + } + } + + // Get Atlas configuration + if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { + var err error + config.Atlas, err = loadAtlasHcl(atlas) + if err != nil { + return nil, err + } + } + + // Build the modules + if modules := list.Filter("module"); len(modules.Items) > 0 { + var err error + config.Modules, err = loadModulesHcl(modules) + if err != nil { + return nil, err + } + } + + // Build the provider configs + if providers := list.Filter("provider"); len(providers.Items) > 0 { + var err error + config.ProviderConfigs, err = loadProvidersHcl(providers) + if err != nil { + return nil, err + } + } + + // Build the resources + { + var err error + managedResourceConfigs := list.Filter("resource") + dataResourceConfigs := list.Filter("data") + + config.Resources = make( + []*Resource, 0, + len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items), + ) + + managedResources, err := loadManagedResourcesHcl(managedResourceConfigs) + if err != nil { + return nil, err + } + dataResources, err := loadDataResourcesHcl(dataResourceConfigs) + if err != nil { + return nil, err + } + + config.Resources = append(config.Resources, dataResources...) + config.Resources = append(config.Resources, managedResources...) + } + + // Build the outputs + if outputs := list.Filter("output"); len(outputs.Items) > 0 { + var err error + config.Outputs, err = loadOutputsHcl(outputs) + if err != nil { + return nil, err + } + } + + // Check for invalid keys + for _, item := range list.Items { + if len(item.Keys) == 0 { + // Not sure how this would happen, but let's avoid a panic + continue + } + + k := item.Keys[0].Token.Value().(string) + if _, ok := validKeys[k]; ok { + continue + } + + config.unknownKeys = append(config.unknownKeys, k) + } + + return config, nil +} + +// loadFileHcl is a fileLoaderFunc that knows how to read HCL +// files and turn them into hclConfigurables. +func loadFileHcl(root string) (configurable, []string, error) { + // Read the HCL file and prepare for parsing + d, err := ioutil.ReadFile(root) + if err != nil { + return nil, nil, fmt.Errorf( + "Error reading %s: %s", root, err) + } + + // Parse it + hclRoot, err := hcl.Parse(string(d)) + if err != nil { + return nil, nil, fmt.Errorf( + "Error parsing %s: %s", root, err) + } + + // Start building the result + result := &hclConfigurable{ + File: root, + Root: hclRoot, + } + + // Dive in, find the imports. This is disabled for now since + // imports were removed prior to Terraform 0.1. The code is + // remaining here commented for historical purposes. + /* + imports := obj.Get("import") + if imports == nil { + result.Object.Ref() + return result, nil, nil + } + + if imports.Type() != libucl.ObjectTypeString { + imports.Close() + + return nil, nil, fmt.Errorf( + "Error in %s: all 'import' declarations should be in the format\n"+ + "`import \"foo\"` (Got type %s)", + root, + imports.Type()) + } + + // Gather all the import paths + importPaths := make([]string, 0, imports.Len()) + iter := imports.Iterate(false) + for imp := iter.Next(); imp != nil; imp = iter.Next() { + path := imp.ToString() + if !filepath.IsAbs(path) { + // Relative paths are relative to the Terraform file itself + dir := filepath.Dir(root) + path = filepath.Join(dir, path) + } + + importPaths = append(importPaths, path) + imp.Close() + } + iter.Close() + imports.Close() + + result.Object.Ref() + */ + + return result, nil, nil +} + +// Given a handle to a HCL object, this transforms it into the Terraform config +func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'terraform' block allowed per module") + } + + // Get our one item + item := list.Items[0] + + // This block should have an empty top level ObjectItem. If there are keys + // here, it's likely because we have a flattened JSON object, and we can + // lift this into a nested ObjectList to decode properly. + if len(item.Keys) > 0 { + item = &ast.ObjectItem{ + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + } + + // We need the item value as an ObjectList + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("terraform block: should be an object") + } + + // NOTE: We purposely don't validate unknown HCL keys here so that + // we can potentially read _future_ Terraform version config (to + // still be able to validate the required version). + // + // We should still keep track of unknown keys to validate later, but + // HCL doesn't currently support that. + + var config Terraform + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading terraform config: %s", + err) + } + + // If we have provisioners, then parse those out + if os := listVal.Filter("backend"); len(os.Items) > 0 { + var err error + config.Backend, err = loadTerraformBackendHcl(os) + if err != nil { + return nil, fmt.Errorf( + "Error reading backend config for terraform block: %s", + err) + } + } + + return &config, nil +} + +// Loads the Backend configuration from an object list. +func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'backend' block allowed") + } + + // Get our one item + item := list.Items[0] + + // Verify the keys + if len(item.Keys) != 1 { + return nil, fmt.Errorf( + "position %s: 'backend' must be followed by exactly one string: a type", + item.Pos()) + } + + typ := item.Keys[0].Token.Value().(string) + + // Decode the raw config + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading backend config: %s", + err) + } + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading backend config: %s", + err) + } + + b := &Backend{ + Type: typ, + RawConfig: rawConfig, + } + b.Hash = b.Rehash() + + return b, nil +} + +// Given a handle to a HCL object, this transforms it into the Atlas +// configuration. +func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'atlas' block allowed") + } + + // Get our one item + item := list.Items[0] + + var config AtlasConfig + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading atlas config: %s", + err) + } + + return &config, nil +} + +// Given a handle to a HCL object, this recurses into the structure +// and pulls out a list of modules. +// +// The resulting modules may not be unique, but each module +// represents exactly one module definition in the HCL configuration. +// We leave it up to another pass to merge them together. +func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { + if err := assertAllBlocksHaveNames("module", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Where all the results will go + var result []*Module + + // Now go over all the types and their children in order to get + // all of the actual resources. + for _, item := range list.Items { + k := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("module '%s': should be an object", k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s: %s", + k, + err) + } + + // Remove the fields we handle specially + delete(config, "source") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s: %s", + k, + err) + } + + // If we have a count, then figure it out + var source string + if o := listVal.Filter("source"); len(o.Items) > 0 { + err = hcl.DecodeObject(&source, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing source for %s: %s", + k, + err) + } + } + + result = append(result, &Module{ + Name: k, + Source: source, + RawConfig: rawConfig, + }) + } + + return result, nil +} + +// LoadOutputsHcl recurses into the given HCL object and turns +// it into a mapping of outputs. +func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { + if err := assertAllBlocksHaveNames("output", list); err != nil { + return nil, err + } + + list = list.Children() + + // Go through each object and turn it into an actual result. + result := make([]*Output, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("output '%s': should be an object", n) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, err + } + + // Delete special keys + delete(config, "depends_on") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for output %s: %s", + n, + err) + } + + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for output %q: %s", + n, + err) + } + } + + result = append(result, &Output{ + Name: n, + RawConfig: rawConfig, + DependsOn: dependsOn, + }) + } + + return result, nil +} + +// LoadVariablesHcl recurses into the given HCL object and turns +// it into a list of variables. +func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { + if err := assertAllBlocksHaveNames("variable", list); err != nil { + return nil, err + } + + list = list.Children() + + // hclVariable is the structure each variable is decoded into + type hclVariable struct { + DeclaredType string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + // Go through each object and turn it into an actual result. + result := make([]*Variable, 0, len(list.Items)) + for _, item := range list.Items { + // Clean up items from JSON + unwrapHCLObjectKeysFromJSON(item, 1) + + // Verify the keys + if len(item.Keys) != 1 { + return nil, fmt.Errorf( + "position %s: 'variable' must be followed by exactly one strings: a name", + item.Pos()) + } + + n := item.Keys[0].Token.Value().(string) + if !NameRegexp.MatchString(n) { + return nil, fmt.Errorf( + "position %s: 'variable' name must match regular expression: %s", + item.Pos(), NameRegexp) + } + + // Check for invalid keys + valid := []string{"type", "default", "description"} + if err := checkHCLKeys(item.Val, valid); err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf( + "variable[%s]:", n)) + } + + // Decode into hclVariable to get typed values + var hclVar hclVariable + if err := hcl.DecodeObject(&hclVar, item.Val); err != nil { + return nil, err + } + + // Defaults turn into a slice of map[string]interface{} and + // we need to make sure to convert that down into the + // proper type for Config. + if ms, ok := hclVar.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + + hclVar.Default = def + } + + // Build the new variable and do some basic validation + newVar := &Variable{ + Name: n, + DeclaredType: hclVar.DeclaredType, + Default: hclVar.Default, + Description: hclVar.Description, + } + if err := newVar.ValidateTypeAndDefault(); err != nil { + return nil, err + } + + result = append(result, newVar) + } + + return result, nil +} + +// LoadProvidersHcl recurses into the given HCL object and turns +// it into a mapping of provider configs. +func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { + if err := assertAllBlocksHaveNames("provider", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Go through each object and turn it into an actual result. + result := make([]*ProviderConfig, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("module '%s': should be an object", n) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, err + } + + delete(config, "alias") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for provider config %s: %s", + n, + err) + } + + // If we have an alias field, then add those in + var alias string + if a := listVal.Filter("alias"); len(a.Items) > 0 { + err := hcl.DecodeObject(&alias, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading alias for provider[%s]: %s", + n, + err) + } + } + + result = append(result, &ProviderConfig{ + Name: n, + Alias: alias, + RawConfig: rawConfig, + }) + } + + return result, nil +} + +// Given a handle to a HCL object, this recurses into the structure +// and pulls out a list of data sources. +// +// The resulting data sources may not be unique, but each one +// represents exactly one data definition in the HCL configuration. +// We leave it up to another pass to merge them together. +func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + if err := assertAllBlocksHaveNames("data", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Where all the results will go + var result []*Resource + + // Now go over all the types and their children in order to get + // all of the actual resources. + for _, item := range list.Items { + if len(item.Keys) != 2 { + return nil, fmt.Errorf( + "position %s: 'data' must be followed by exactly two strings: a type and a name", + item.Pos()) + } + + t := item.Keys[0].Token.Value().(string) + k := item.Keys[1].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // Remove the fields we handle specially + delete(config, "depends_on") + delete(config, "provider") + delete(config, "count") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // If we have a count, then figure it out + var count string = "1" + if o := listVal.Filter("count"); len(o.Items) > 0 { + err = hcl.DecodeObject(&count, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing count for %s[%s]: %s", + t, + k, + err) + } + } + countConfig, err := NewRawConfig(map[string]interface{}{ + "count": count, + }) + if err != nil { + return nil, err + } + countConfig.Key = "count" + + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have a provider, then parse it out + var provider string + if o := listVal.Filter("provider"); len(o.Items) > 0 { + err := hcl.DecodeObject(&provider, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading provider for %s[%s]: %s", + t, + k, + err) + } + } + + result = append(result, &Resource{ + Mode: DataResourceMode, + Name: k, + Type: t, + RawCount: countConfig, + RawConfig: rawConfig, + Provider: provider, + Provisioners: []*Provisioner{}, + DependsOn: dependsOn, + Lifecycle: ResourceLifecycle{}, + }) + } + + return result, nil +} + +// Given a handle to a HCL object, this recurses into the structure +// and pulls out a list of managed resources. +// +// The resulting resources may not be unique, but each resource +// represents exactly one "resource" block in the HCL configuration. +// We leave it up to another pass to merge them together. +func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Where all the results will go + var result []*Resource + + // Now go over all the types and their children in order to get + // all of the actual resources. + for _, item := range list.Items { + // GH-4385: We detect a pure provisioner resource and give the user + // an error about how to do it cleanly. + if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" { + return nil, fmt.Errorf( + "position %s: provisioners in a resource should be wrapped in a list\n\n"+ + "Example: \"provisioner\": [ { \"local-exec\": ... } ]", + item.Pos()) + } + + // Fix up JSON input + unwrapHCLObjectKeysFromJSON(item, 2) + + if len(item.Keys) != 2 { + return nil, fmt.Errorf( + "position %s: resource must be followed by exactly two strings, a type and a name", + item.Pos()) + } + + t := item.Keys[0].Token.Value().(string) + k := item.Keys[1].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // Remove the fields we handle specially + delete(config, "connection") + delete(config, "count") + delete(config, "depends_on") + delete(config, "provisioner") + delete(config, "provider") + delete(config, "lifecycle") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // If we have a count, then figure it out + var count string = "1" + if o := listVal.Filter("count"); len(o.Items) > 0 { + err = hcl.DecodeObject(&count, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing count for %s[%s]: %s", + t, + k, + err) + } + } + countConfig, err := NewRawConfig(map[string]interface{}{ + "count": count, + }) + if err != nil { + return nil, err + } + countConfig.Key = "count" + + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have connection info, then parse those out + var connInfo map[string]interface{} + if o := listVal.Filter("connection"); len(o.Items) > 0 { + err := hcl.DecodeObject(&connInfo, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading connection info for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have provisioners, then parse those out + var provisioners []*Provisioner + if os := listVal.Filter("provisioner"); len(os.Items) > 0 { + var err error + provisioners, err = loadProvisionersHcl(os, connInfo) + if err != nil { + return nil, fmt.Errorf( + "Error reading provisioners for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have a provider, then parse it out + var provider string + if o := listVal.Filter("provider"); len(o.Items) > 0 { + err := hcl.DecodeObject(&provider, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading provider for %s[%s]: %s", + t, + k, + err) + } + } + + // Check if the resource should be re-created before + // destroying the existing instance + var lifecycle ResourceLifecycle + if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { + if len(o.Items) > 1 { + return nil, fmt.Errorf( + "%s[%s]: Multiple lifecycle blocks found, expected one", + t, k) + } + + // Check for invalid keys + valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"} + if err := checkHCLKeys(o.Items[0].Val, valid); err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf( + "%s[%s]:", t, k)) + } + + var raw map[string]interface{} + if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil { + return nil, fmt.Errorf( + "Error parsing lifecycle for %s[%s]: %s", + t, + k, + err) + } + + if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { + return nil, fmt.Errorf( + "Error parsing lifecycle for %s[%s]: %s", + t, + k, + err) + } + } + + result = append(result, &Resource{ + Mode: ManagedResourceMode, + Name: k, + Type: t, + RawCount: countConfig, + RawConfig: rawConfig, + Provisioners: provisioners, + Provider: provider, + DependsOn: dependsOn, + Lifecycle: lifecycle, + }) + } + + return result, nil +} + +func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { + if err := assertAllBlocksHaveNames("provisioner", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Go through each object and turn it into an actual result. + result := make([]*Provisioner, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("provisioner '%s': should be an object", n) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, err + } + + // Parse the "when" value + when := ProvisionerWhenCreate + if v, ok := config["when"]; ok { + switch v { + case "create": + when = ProvisionerWhenCreate + case "destroy": + when = ProvisionerWhenDestroy + default: + return nil, fmt.Errorf( + "position %s: 'provisioner' when must be 'create' or 'destroy'", + item.Pos()) + } + } + + // Parse the "on_failure" value + onFailure := ProvisionerOnFailureFail + if v, ok := config["on_failure"]; ok { + switch v { + case "continue": + onFailure = ProvisionerOnFailureContinue + case "fail": + onFailure = ProvisionerOnFailureFail + default: + return nil, fmt.Errorf( + "position %s: 'provisioner' on_failure must be 'continue' or 'fail'", + item.Pos()) + } + } + + // Delete fields we special case + delete(config, "connection") + delete(config, "when") + delete(config, "on_failure") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, err + } + + // Check if we have a provisioner-level connection + // block that overrides the resource-level + var subConnInfo map[string]interface{} + if o := listVal.Filter("connection"); len(o.Items) > 0 { + err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val) + if err != nil { + return nil, err + } + } + + // Inherit from the resource connInfo any keys + // that are not explicitly overriden. + if connInfo != nil && subConnInfo != nil { + for k, v := range connInfo { + if _, ok := subConnInfo[k]; !ok { + subConnInfo[k] = v + } + } + } else if subConnInfo == nil { + subConnInfo = connInfo + } + + // Parse the connInfo + connRaw, err := NewRawConfig(subConnInfo) + if err != nil { + return nil, err + } + + result = append(result, &Provisioner{ + Type: n, + RawConfig: rawConfig, + ConnInfo: connRaw, + When: when, + OnFailure: onFailure, + }) + } + + return result, nil +} + +/* +func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { + objects := make(map[string][]*hclobj.Object) + + for _, o := range os.Elem(false) { + for _, elem := range o.Elem(true) { + val, ok := objects[elem.Key] + if !ok { + val = make([]*hclobj.Object, 0, 1) + } + + val = append(val, elem) + objects[elem.Key] = val + } + } + + return objects +} +*/ + +// assertAllBlocksHaveNames returns an error if any of the items in +// the given object list are blocks without keys (like "module {}") +// or simple assignments (like "module = 1"). It returns nil if +// neither of these things are true. +// +// The given name is used in any generated error messages, and should +// be the name of the block we're dealing with. The given list should +// be the result of calling .Filter on an object list with that same +// name. +func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { + if elem := list.Elem(); len(elem.Items) != 0 { + switch et := elem.Items[0].Val.(type) { + case *ast.ObjectType: + pos := et.Lbrace + return fmt.Errorf("%s: %q must be followed by a name", pos, name) + default: + pos := elem.Items[0].Val.Pos() + return fmt.Errorf("%s: %q must be a configuration block", pos, name) + } + } + return nil +} + +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key: %s", key)) + } + } + + return result +} + +// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{ + &ast.ObjectItem{ + Keys: []*ast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go new file mode 100644 index 00000000..db214be4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/merge.go @@ -0,0 +1,193 @@ +package config + +// Merge merges two configurations into a single configuration. +// +// Merge allows for the two configurations to have duplicate resources, +// because the resources will be merged. This differs from a single +// Config which must only have unique resources. +func Merge(c1, c2 *Config) (*Config, error) { + c := new(Config) + + // Merge unknown keys + unknowns := make(map[string]struct{}) + for _, k := range c1.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + for _, k := range c2.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + + // Merge Atlas configuration. This is a dumb one overrides the other + // sort of merge. + c.Atlas = c1.Atlas + if c2.Atlas != nil { + c.Atlas = c2.Atlas + } + + // Merge the Terraform configuration + if c1.Terraform != nil { + c.Terraform = c1.Terraform + if c2.Terraform != nil { + c.Terraform.Merge(c2.Terraform) + } + } else { + c.Terraform = c2.Terraform + } + + // NOTE: Everything below is pretty gross. Due to the lack of generics + // in Go, there is some hoop-jumping involved to make this merging a + // little more test-friendly and less repetitive. Ironically, making it + // less repetitive involves being a little repetitive, but I prefer to + // be repetitive with things that are less error prone than things that + // are more error prone (more logic). Type conversions to an interface + // are pretty low-error. + + var m1, m2, mresult []merger + + // Modules + m1 = make([]merger, 0, len(c1.Modules)) + m2 = make([]merger, 0, len(c2.Modules)) + for _, v := range c1.Modules { + m1 = append(m1, v) + } + for _, v := range c2.Modules { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Modules = make([]*Module, len(mresult)) + for i, v := range mresult { + c.Modules[i] = v.(*Module) + } + } + + // Outputs + m1 = make([]merger, 0, len(c1.Outputs)) + m2 = make([]merger, 0, len(c2.Outputs)) + for _, v := range c1.Outputs { + m1 = append(m1, v) + } + for _, v := range c2.Outputs { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Outputs = make([]*Output, len(mresult)) + for i, v := range mresult { + c.Outputs[i] = v.(*Output) + } + } + + // Provider Configs + m1 = make([]merger, 0, len(c1.ProviderConfigs)) + m2 = make([]merger, 0, len(c2.ProviderConfigs)) + for _, v := range c1.ProviderConfigs { + m1 = append(m1, v) + } + for _, v := range c2.ProviderConfigs { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.ProviderConfigs = make([]*ProviderConfig, len(mresult)) + for i, v := range mresult { + c.ProviderConfigs[i] = v.(*ProviderConfig) + } + } + + // Resources + m1 = make([]merger, 0, len(c1.Resources)) + m2 = make([]merger, 0, len(c2.Resources)) + for _, v := range c1.Resources { + m1 = append(m1, v) + } + for _, v := range c2.Resources { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Resources = make([]*Resource, len(mresult)) + for i, v := range mresult { + c.Resources[i] = v.(*Resource) + } + } + + // Variables + m1 = make([]merger, 0, len(c1.Variables)) + m2 = make([]merger, 0, len(c2.Variables)) + for _, v := range c1.Variables { + m1 = append(m1, v) + } + for _, v := range c2.Variables { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Variables = make([]*Variable, len(mresult)) + for i, v := range mresult { + c.Variables[i] = v.(*Variable) + } + } + + return c, nil +} + +// merger is an interface that must be implemented by types that are +// merge-able. This simplifies the implementation of Merge for the various +// components of a Config. +type merger interface { + mergerName() string + mergerMerge(merger) merger +} + +// mergeSlice merges a slice of mergers. +func mergeSlice(m1, m2 []merger) []merger { + r := make([]merger, len(m1), len(m1)+len(m2)) + copy(r, m1) + + m := map[string]struct{}{} + for _, v2 := range m2 { + // If we already saw it, just append it because its a + // duplicate and invalid... + name := v2.mergerName() + if _, ok := m[name]; ok { + r = append(r, v2) + continue + } + m[name] = struct{}{} + + // Find an original to override + var original merger + originalIndex := -1 + for i, v := range m1 { + if v.mergerName() == name { + originalIndex = i + original = v + break + } + } + + var v merger + if original == nil { + v = v2 + } else { + v = original.mergerMerge(v2) + } + + if originalIndex == -1 { + r = append(r, v) + } else { + r[originalIndex] = v + } + } + + return r +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go new file mode 100644 index 00000000..095f61d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go @@ -0,0 +1,114 @@ +package module + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go new file mode 100644 index 00000000..96b4a63c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/get.go @@ -0,0 +1,71 @@ +package module + +import ( + "io/ioutil" + "os" + + "github.com/hashicorp/go-getter" +) + +// GetMode is an enum that describes how modules are loaded. +// +// GetModeLoad says that modules will not be downloaded or updated, they will +// only be loaded from the storage. +// +// GetModeGet says that modules can be initially downloaded if they don't +// exist, but otherwise to just load from the current version in storage. +// +// GetModeUpdate says that modules should be checked for updates and +// downloaded prior to loading. If there are no updates, we load the version +// from disk, otherwise we download first and then load. +type GetMode byte + +const ( + GetModeNone GetMode = iota + GetModeGet + GetModeUpdate +) + +// GetCopy is the same as Get except that it downloads a copy of the +// module represented by source. +// +// This copy will omit and dot-prefixed files (such as .git/, .hg/) and +// can't be updated on its own. +func GetCopy(dst, src string) error { + // Create the temporary directory to do the real Get to + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + // FIXME: This isn't completely safe. Creating and removing our temp path + // exposes where to race to inject files. + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + // Get to that temporary dir + if err := getter.Get(tmpDir, src); err != nil { + return err + } + + // Make sure the destination exists + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + // Copy to the final location + return copyDir(dst, tmpDir) +} + +func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { + // Get the module with the level specified if we were told to. + if mode > GetModeNone { + if err := s.Get(key, src, mode == GetModeUpdate); err != nil { + return "", false, err + } + } + + // Get the directory where the module is. + return s.Dir(key) +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go new file mode 100644 index 00000000..8603ee26 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris + +package module + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go new file mode 100644 index 00000000..0d95730d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package module + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go new file mode 100644 index 00000000..c0cf4553 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package module + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go new file mode 100644 index 00000000..f8649f6e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/module.go @@ -0,0 +1,7 @@ +package module + +// Module represents the metadata for a single module. +type Module struct { + Name string + Source string +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go new file mode 100644 index 00000000..b6f90fd9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -0,0 +1,428 @@ +package module + +import ( + "bufio" + "bytes" + "fmt" + "path/filepath" + "strings" + "sync" + + "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/config" +) + +// RootName is the name of the root tree. +const RootName = "root" + +// Tree represents the module import tree of configurations. +// +// This Tree structure can be used to get (download) new modules, load +// all the modules without getting, flatten the tree into something +// Terraform can use, etc. +type Tree struct { + name string + config *config.Config + children map[string]*Tree + path []string + lock sync.RWMutex +} + +// NewTree returns a new Tree for the given config structure. +func NewTree(name string, c *config.Config) *Tree { + return &Tree{config: c, name: name} +} + +// NewEmptyTree returns a new tree that is empty (contains no configuration). +func NewEmptyTree() *Tree { + t := &Tree{config: &config.Config{}} + + // We do this dummy load so that the tree is marked as "loaded". It + // should never fail because this is just about a no-op. If it does fail + // we panic so we can know its a bug. + if err := t.Load(nil, GetModeGet); err != nil { + panic(err) + } + + return t +} + +// NewTreeModule is like NewTree except it parses the configuration in +// the directory and gives it a specific name. Use a blank name "" to specify +// the root module. +func NewTreeModule(name, dir string) (*Tree, error) { + c, err := config.LoadDir(dir) + if err != nil { + return nil, err + } + + return NewTree(name, c), nil +} + +// Config returns the configuration for this module. +func (t *Tree) Config() *config.Config { + return t.config +} + +// Child returns the child with the given path (by name). +func (t *Tree) Child(path []string) *Tree { + if t == nil { + return nil + } + + if len(path) == 0 { + return t + } + + c := t.Children()[path[0]] + if c == nil { + return nil + } + + return c.Child(path[1:]) +} + +// Children returns the children of this tree (the modules that are +// imported by this root). +// +// This will only return a non-nil value after Load is called. +func (t *Tree) Children() map[string]*Tree { + t.lock.RLock() + defer t.lock.RUnlock() + return t.children +} + +// Loaded says whether or not this tree has been loaded or not yet. +func (t *Tree) Loaded() bool { + t.lock.RLock() + defer t.lock.RUnlock() + return t.children != nil +} + +// Modules returns the list of modules that this tree imports. +// +// This is only the imports of _this_ level of the tree. To retrieve the +// full nested imports, you'll have to traverse the tree. +func (t *Tree) Modules() []*Module { + result := make([]*Module, len(t.config.Modules)) + for i, m := range t.config.Modules { + result[i] = &Module{ + Name: m.Name, + Source: m.Source, + } + } + + return result +} + +// Name returns the name of the tree. This will be "" for the root +// tree and then the module name given for any children. +func (t *Tree) Name() string { + if t.name == "" { + return RootName + } + + return t.name +} + +// Load loads the configuration of the entire tree. +// +// The parameters are used to tell the tree where to find modules and +// whether it can download/update modules along the way. +// +// Calling this multiple times will reload the tree. +// +// Various semantic-like checks are made along the way of loading since +// module trees inherently require the configuration to be in a reasonably +// sane state: no circular dependencies, proper module sources, etc. A full +// suite of validations can be done by running Validate (after loading). +func (t *Tree) Load(s getter.Storage, mode GetMode) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Reset the children if we have any + t.children = nil + + modules := t.Modules() + children := make(map[string]*Tree) + + // Go through all the modules and get the directory for them. + for _, m := range modules { + if _, ok := children[m.Name]; ok { + return fmt.Errorf( + "module %s: duplicated. module names must be unique", m.Name) + } + + // Determine the path to this child + path := make([]string, len(t.path), len(t.path)+1) + copy(path, t.path) + path = append(path, m.Name) + + // Split out the subdir if we have one + source, subDir := getter.SourceDirSubdir(m.Source) + + source, err := getter.Detect(source, t.config.Dir, getter.Detectors) + if err != nil { + return fmt.Errorf("module %s: %s", m.Name, err) + } + + // Check if the detector introduced something new. + source, subDir2 := getter.SourceDirSubdir(source) + if subDir2 != "" { + subDir = filepath.Join(subDir2, subDir) + } + + // Get the directory where this module is so we can load it + key := strings.Join(path, ".") + key = fmt.Sprintf("root.%s-%s", key, m.Source) + dir, ok, err := getStorage(s, key, source, mode) + if err != nil { + return err + } + if !ok { + return fmt.Errorf( + "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) + } + + // If we have a subdirectory, then merge that in + if subDir != "" { + dir = filepath.Join(dir, subDir) + } + + // Load the configurations.Dir(source) + children[m.Name], err = NewTreeModule(m.Name, dir) + if err != nil { + return fmt.Errorf( + "module %s: %s", m.Name, err) + } + + // Set the path of this child + children[m.Name].path = path + } + + // Go through all the children and load them. + for _, c := range children { + if err := c.Load(s, mode); err != nil { + return err + } + } + + // Set our tree up + t.children = children + + return nil +} + +// Path is the full path to this tree. +func (t *Tree) Path() []string { + return t.path +} + +// String gives a nice output to describe the tree. +func (t *Tree) String() string { + var result bytes.Buffer + path := strings.Join(t.path, ", ") + if path != "" { + path = fmt.Sprintf(" (path: %s)", path) + } + result.WriteString(t.Name() + path + "\n") + + cs := t.Children() + if cs == nil { + result.WriteString(" not loaded") + } else { + // Go through each child and get its string value, then indent it + // by two. + for _, c := range cs { + r := strings.NewReader(c.String()) + scanner := bufio.NewScanner(r) + for scanner.Scan() { + result.WriteString(" ") + result.WriteString(scanner.Text()) + result.WriteString("\n") + } + } + } + + return result.String() +} + +// Validate does semantic checks on the entire tree of configurations. +// +// This will call the respective config.Config.Validate() functions as well +// as verifying things such as parameters/outputs between the various modules. +// +// Load must be called prior to calling Validate or an error will be returned. +func (t *Tree) Validate() error { + if !t.Loaded() { + return fmt.Errorf("tree must be loaded before calling Validate") + } + + // If something goes wrong, here is our error template + newErr := &treeError{Name: []string{t.Name()}} + + // Terraform core does not handle root module children named "root". + // We plan to fix this in the future but this bug was brought up in + // the middle of a release and we don't want to introduce wide-sweeping + // changes at that time. + if len(t.path) == 1 && t.name == "root" { + return fmt.Errorf("root module cannot contain module named 'root'") + } + + // Validate our configuration first. + if err := t.config.Validate(); err != nil { + newErr.Add(err) + } + + // If we're the root, we do extra validation. This validation usually + // requires the entire tree (since children don't have parent pointers). + if len(t.path) == 0 { + if err := t.validateProviderAlias(); err != nil { + newErr.Add(err) + } + } + + // Get the child trees + children := t.Children() + + // Validate all our children + for _, c := range children { + err := c.Validate() + if err == nil { + continue + } + + verr, ok := err.(*treeError) + if !ok { + // Unknown error, just return... + return err + } + + // Append ourselves to the error and then return + verr.Name = append(verr.Name, t.Name()) + newErr.AddChild(verr) + } + + // Go over all the modules and verify that any parameters are valid + // variables into the module in question. + for _, m := range t.config.Modules { + tree, ok := children[m.Name] + if !ok { + // This should never happen because Load watches us + panic("module not found in children: " + m.Name) + } + + // Build the variables that the module defines + requiredMap := make(map[string]struct{}) + varMap := make(map[string]struct{}) + for _, v := range tree.config.Variables { + varMap[v.Name] = struct{}{} + + if v.Required() { + requiredMap[v.Name] = struct{}{} + } + } + + // Compare to the keys in our raw config for the module + for k, _ := range m.RawConfig.Raw { + if _, ok := varMap[k]; !ok { + newErr.Add(fmt.Errorf( + "module %s: %s is not a valid parameter", + m.Name, k)) + } + + // Remove the required + delete(requiredMap, k) + } + + // If we have any required left over, they aren't set. + for k, _ := range requiredMap { + newErr.Add(fmt.Errorf( + "module %s: required variable %q not set", + m.Name, k)) + } + } + + // Go over all the variables used and make sure that any module + // variables represent outputs properly. + for source, vs := range t.config.InterpolatedVariables() { + for _, v := range vs { + mv, ok := v.(*config.ModuleVariable) + if !ok { + continue + } + + tree, ok := children[mv.Name] + if !ok { + newErr.Add(fmt.Errorf( + "%s: undefined module referenced %s", + source, mv.Name)) + continue + } + + found := false + for _, o := range tree.config.Outputs { + if o.Name == mv.Field { + found = true + break + } + } + if !found { + newErr.Add(fmt.Errorf( + "%s: %s is not a valid output for module %s", + source, mv.Field, mv.Name)) + } + } + } + + return newErr.ErrOrNil() +} + +// treeError is an error use by Tree.Validate to accumulates all +// validation errors. +type treeError struct { + Name []string + Errs []error + Children []*treeError +} + +func (e *treeError) Add(err error) { + e.Errs = append(e.Errs, err) +} + +func (e *treeError) AddChild(err *treeError) { + e.Children = append(e.Children, err) +} + +func (e *treeError) ErrOrNil() error { + if len(e.Errs) > 0 || len(e.Children) > 0 { + return e + } + return nil +} + +func (e *treeError) Error() string { + name := strings.Join(e.Name, ".") + var out bytes.Buffer + fmt.Fprintf(&out, "module %s: ", name) + + if len(e.Errs) == 1 { + // single like error + out.WriteString(e.Errs[0].Error()) + } else { + // multi-line error + for _, err := range e.Errs { + fmt.Fprintf(&out, "\n %s", err) + } + } + + if len(e.Children) > 0 { + // start the next error on a new line + out.WriteString("\n ") + } + for _, child := range e.Children { + out.WriteString(child.Error()) + } + + return out.String() +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go new file mode 100644 index 00000000..fcd37f4e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go @@ -0,0 +1,57 @@ +package module + +import ( + "bytes" + "encoding/gob" + + "github.com/hashicorp/terraform/config" +) + +func (t *Tree) GobDecode(bs []byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Decode the gob data + var data treeGob + dec := gob.NewDecoder(bytes.NewReader(bs)) + if err := dec.Decode(&data); err != nil { + return err + } + + // Set the fields + t.name = data.Name + t.config = data.Config + t.children = data.Children + t.path = data.Path + + return nil +} + +func (t *Tree) GobEncode() ([]byte, error) { + data := &treeGob{ + Config: t.config, + Children: t.children, + Name: t.name, + Path: t.path, + } + + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(data); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// treeGob is used as a structure to Gob encode a tree. +// +// This structure is private so it can't be referenced but the fields are +// public, allowing Gob to properly encode this. When we decode this, we are +// able to turn it into a Tree. +type treeGob struct { + Config *config.Config + Children map[string]*Tree + Name string + Path []string +} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go new file mode 100644 index 00000000..f8498d85 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go @@ -0,0 +1,335 @@ +package config + +import ( + "bytes" + "encoding/gob" + "sync" + + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// RawConfig is a structure that holds a piece of configuration +// where the overall structure is unknown since it will be used +// to configure a plugin or some other similar external component. +// +// RawConfigs can be interpolated with variables that come from +// other resources, user variables, etc. +// +// RawConfig supports a query-like interface to request +// information from deep within the structure. +type RawConfig struct { + Key string + Raw map[string]interface{} + Interpolations []ast.Node + Variables map[string]InterpolatedVariable + + lock sync.Mutex + config map[string]interface{} + unknownKeys []string +} + +// NewRawConfig creates a new RawConfig structure and populates the +// publicly readable struct fields. +func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { + result := &RawConfig{Raw: raw} + if err := result.init(); err != nil { + return nil, err + } + + return result, nil +} + +// RawMap returns a copy of the RawConfig.Raw map. +func (r *RawConfig) RawMap() map[string]interface{} { + r.lock.Lock() + defer r.lock.Unlock() + + m := make(map[string]interface{}) + for k, v := range r.Raw { + m[k] = v + } + return m +} + +// Copy returns a copy of this RawConfig, uninterpolated. +func (r *RawConfig) Copy() *RawConfig { + if r == nil { + return nil + } + + r.lock.Lock() + defer r.lock.Unlock() + + newRaw := make(map[string]interface{}) + for k, v := range r.Raw { + newRaw[k] = v + } + + result, err := NewRawConfig(newRaw) + if err != nil { + panic("copy failed: " + err.Error()) + } + + result.Key = r.Key + return result +} + +// Value returns the value of the configuration if this configuration +// has a Key set. If this does not have a Key set, nil will be returned. +func (r *RawConfig) Value() interface{} { + if c := r.Config(); c != nil { + if v, ok := c[r.Key]; ok { + return v + } + } + + r.lock.Lock() + defer r.lock.Unlock() + return r.Raw[r.Key] +} + +// Config returns the entire configuration with the variables +// interpolated from any call to Interpolate. +// +// If any interpolated variables are unknown (value set to +// UnknownVariableValue), the first non-container (map, slice, etc.) element +// will be removed from the config. The keys of unknown variables +// can be found using the UnknownKeys function. +// +// By pruning out unknown keys from the configuration, the raw +// structure will always successfully decode into its ultimate +// structure using something like mapstructure. +func (r *RawConfig) Config() map[string]interface{} { + r.lock.Lock() + defer r.lock.Unlock() + return r.config +} + +// Interpolate uses the given mapping of variable values and uses +// those as the values to replace any variables in this raw +// configuration. +// +// Any prior calls to Interpolate are replaced with this one. +// +// If a variable key is missing, this will panic. +func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { + r.lock.Lock() + defer r.lock.Unlock() + + config := langEvalConfig(vs) + return r.interpolate(func(root ast.Node) (interface{}, error) { + // None of the variables we need are computed, meaning we should + // be able to properly evaluate. + result, err := hil.Eval(root, config) + if err != nil { + return "", err + } + + return result.Value, nil + }) +} + +// Merge merges another RawConfig into this one (overriding any conflicting +// values in this config) and returns a new config. The original config +// is not modified. +func (r *RawConfig) Merge(other *RawConfig) *RawConfig { + r.lock.Lock() + defer r.lock.Unlock() + + // Merge the raw configurations + raw := make(map[string]interface{}) + for k, v := range r.Raw { + raw[k] = v + } + for k, v := range other.Raw { + raw[k] = v + } + + // Create the result + result, err := NewRawConfig(raw) + if err != nil { + panic(err) + } + + // Merge the interpolated results + result.config = make(map[string]interface{}) + for k, v := range r.config { + result.config[k] = v + } + for k, v := range other.config { + result.config[k] = v + } + + // Build the unknown keys + if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 { + unknownKeys := make(map[string]struct{}) + for _, k := range r.unknownKeys { + unknownKeys[k] = struct{}{} + } + for _, k := range other.unknownKeys { + unknownKeys[k] = struct{}{} + } + + result.unknownKeys = make([]string, 0, len(unknownKeys)) + for k, _ := range unknownKeys { + result.unknownKeys = append(result.unknownKeys, k) + } + } + + return result +} + +func (r *RawConfig) init() error { + r.lock.Lock() + defer r.lock.Unlock() + + r.config = r.Raw + r.Interpolations = nil + r.Variables = nil + + fn := func(node ast.Node) (interface{}, error) { + r.Interpolations = append(r.Interpolations, node) + vars, err := DetectVariables(node) + if err != nil { + return "", err + } + + for _, v := range vars { + if r.Variables == nil { + r.Variables = make(map[string]InterpolatedVariable) + } + + r.Variables[v.FullKey()] = v + } + + return "", nil + } + + walker := &interpolationWalker{F: fn} + if err := reflectwalk.Walk(r.Raw, walker); err != nil { + return err + } + + return nil +} + +func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { + config, err := copystructure.Copy(r.Raw) + if err != nil { + return err + } + r.config = config.(map[string]interface{}) + + w := &interpolationWalker{F: fn, Replace: true} + err = reflectwalk.Walk(r.config, w) + if err != nil { + return err + } + + r.unknownKeys = w.unknownKeys + return nil +} + +func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { + if r == nil && r2 == nil { + return nil + } + + if r == nil { + r = &RawConfig{} + } + + rawRaw, err := copystructure.Copy(r.Raw) + if err != nil { + panic(err) + } + + raw := rawRaw.(map[string]interface{}) + if r2 != nil { + for k, v := range r2.Raw { + raw[k] = v + } + } + + result, err := NewRawConfig(raw) + if err != nil { + panic(err) + } + + return result +} + +// UnknownKeys returns the keys of the configuration that are unknown +// because they had interpolated variables that must be computed. +func (r *RawConfig) UnknownKeys() []string { + r.lock.Lock() + defer r.lock.Unlock() + return r.unknownKeys +} + +// See GobEncode +func (r *RawConfig) GobDecode(b []byte) error { + var data gobRawConfig + err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data) + if err != nil { + return err + } + + r.Key = data.Key + r.Raw = data.Raw + + return r.init() +} + +// GobEncode is a custom Gob encoder to use so that we only include the +// raw configuration. Interpolated variables and such are lost and the +// tree of interpolated variables is recomputed on decode, since it is +// referentially transparent. +func (r *RawConfig) GobEncode() ([]byte, error) { + r.lock.Lock() + defer r.lock.Unlock() + + data := gobRawConfig{ + Key: r.Key, + Raw: r.Raw, + } + + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(data); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type gobRawConfig struct { + Key string + Raw map[string]interface{} +} + +// langEvalConfig returns the evaluation configuration we use to execute. +func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig { + funcMap := make(map[string]ast.Function) + for k, v := range Funcs() { + funcMap[k] = v + } + funcMap["lookup"] = interpolationFuncLookup(vs) + funcMap["keys"] = interpolationFuncKeys(vs) + funcMap["values"] = interpolationFuncValues(vs) + + return &hil.EvalConfig{ + GlobalScope: &ast.BasicScope{ + VarMap: vs, + FuncMap: funcMap, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go new file mode 100644 index 00000000..877c6e84 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode.go @@ -0,0 +1,9 @@ +package config + +//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go new file mode 100644 index 00000000..ea68b4fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package config + +import "fmt" + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return fmt.Sprintf("ResourceMode(%d)", i) + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go new file mode 100644 index 00000000..f8776bc5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -0,0 +1,286 @@ +package dag + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/go-multierror" +) + +// AcyclicGraph is a specialization of Graph that cannot have cycles. With +// this property, we get the property of sane graph traversal. +type AcyclicGraph struct { + Graph +} + +// WalkFunc is the callback used for walking the graph. +type WalkFunc func(Vertex) error + +// DepthWalkFunc is a walk function that also receives the current depth of the +// walk as an argument +type DepthWalkFunc func(Vertex, int) error + +func (g *AcyclicGraph) DirectedGraph() Grapher { + return g +} + +// Returns a Set that includes every Vertex yielded by walking down from the +// provided starting Vertex v. +func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.DownEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.DepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Returns a Set that includes every Vertex yielded by walking up from the +// provided starting Vertex v. +func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.UpEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Root returns the root of the DAG, or an error. +// +// Complexity: O(V) +func (g *AcyclicGraph) Root() (Vertex, error) { + roots := make([]Vertex, 0, 1) + for _, v := range g.Vertices() { + if g.UpEdges(v).Len() == 0 { + roots = append(roots, v) + } + } + + if len(roots) > 1 { + // TODO(mitchellh): make this error message a lot better + return nil, fmt.Errorf("multiple roots: %#v", roots) + } + + if len(roots) == 0 { + return nil, fmt.Errorf("no roots found") + } + + return roots[0], nil +} + +// TransitiveReduction performs the transitive reduction of graph g in place. +// The transitive reduction of a graph is a graph with as few edges as +// possible with the same reachability as the original graph. This means +// that if there are three nodes A => B => C, and A connects to both +// B and C, and B connects to C, then the transitive reduction is the +// same graph with only a single edge between A and B, and a single edge +// between B and C. +// +// The graph must be valid for this operation to behave properly. If +// Validate() returns an error, the behavior is undefined and the results +// will likely be unexpected. +// +// Complexity: O(V(V+E)), or asymptotically O(VE) +func (g *AcyclicGraph) TransitiveReduction() { + // For each vertex u in graph g, do a DFS starting from each vertex + // v such that the edge (u,v) exists (v is a direct descendant of u). + // + // For each v-prime reachable from v, remove the edge (u, v-prime). + defer g.debug.BeginOperation("TransitiveReduction", "").End("") + + for _, u := range g.Vertices() { + uTargets := g.DownEdges(u) + vs := AsVertexList(g.DownEdges(u)) + + g.DepthFirstWalk(vs, func(v Vertex, d int) error { + shared := uTargets.Intersection(g.DownEdges(v)) + for _, vPrime := range AsVertexList(shared) { + g.RemoveEdge(BasicEdge(u, vPrime)) + } + + return nil + }) + } +} + +// Validate validates the DAG. A DAG is valid if it has a single root +// with no cycles. +func (g *AcyclicGraph) Validate() error { + if _, err := g.Root(); err != nil { + return err + } + + // Look for cycles of more than 1 component + var err error + cycles := g.Cycles() + if len(cycles) > 0 { + for _, cycle := range cycles { + cycleStr := make([]string, len(cycle)) + for j, vertex := range cycle { + cycleStr[j] = VertexName(vertex) + } + + err = multierror.Append(err, fmt.Errorf( + "Cycle: %s", strings.Join(cycleStr, ", "))) + } + } + + // Look for cycles to self + for _, e := range g.Edges() { + if e.Source() == e.Target() { + err = multierror.Append(err, fmt.Errorf( + "Self reference: %s", VertexName(e.Source()))) + } + } + + return err +} + +func (g *AcyclicGraph) Cycles() [][]Vertex { + var cycles [][]Vertex + for _, cycle := range StronglyConnected(&g.Graph) { + if len(cycle) > 1 { + cycles = append(cycles, cycle) + } + } + return cycles +} + +// Walk walks the graph, calling your callback as each node is visited. +// This will walk nodes in parallel if it can. Because the walk is done +// in parallel, the error returned will be a multierror. +func (g *AcyclicGraph) Walk(cb WalkFunc) error { + defer g.debug.BeginOperation(typeWalk, "").End("") + + w := &Walker{Callback: cb, Reverse: true} + w.Update(g) + return w.Wait() +} + +// simple convenience helper for converting a dag.Set to a []Vertex +func AsVertexList(s *Set) []Vertex { + rawList := s.List() + vertexList := make([]Vertex, len(rawList)) + for i, raw := range rawList { + vertexList[i] = raw.(Vertex) + } + return vertexList +} + +type vertexAtDepth struct { + Vertex Vertex + Depth int +} + +// depthFirstWalk does a depth-first walk of the graph starting from +// the vertices in start. This is not exported now but it would make sense +// to export this publicly at some point. +func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + + // Visit targets of this in a consistent order. + targets := AsVertexList(g.DownEdges(current.Vertex)) + sort.Sort(byVertexName(targets)) + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + } + + return nil +} + +// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Add next set of targets in a consistent order. + targets := AsVertexList(g.UpEdges(current.Vertex)) + sort.Sort(byVertexName(targets)) + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + } + + return nil +} + +// byVertexName implements sort.Interface so a list of Vertices can be sorted +// consistently by their VertexName +type byVertexName []Vertex + +func (b byVertexName) Len() int { return len(b) } +func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byVertexName) Less(i, j int) bool { + return VertexName(b[i]) < VertexName(b[j]) +} diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go new file mode 100644 index 00000000..f0d99ee3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/edge.go @@ -0,0 +1,37 @@ +package dag + +import ( + "fmt" +) + +// Edge represents an edge in the graph, with a source and target vertex. +type Edge interface { + Source() Vertex + Target() Vertex + + Hashable +} + +// BasicEdge returns an Edge implementation that simply tracks the source +// and target given as-is. +func BasicEdge(source, target Vertex) Edge { + return &basicEdge{S: source, T: target} +} + +// basicEdge is a basic implementation of Edge that has the source and +// target vertex. +type basicEdge struct { + S, T Vertex +} + +func (e *basicEdge) Hashcode() interface{} { + return fmt.Sprintf("%p-%p", e.S, e.T) +} + +func (e *basicEdge) Source() Vertex { + return e.S +} + +func (e *basicEdge) Target() Vertex { + return e.T +} diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go new file mode 100644 index 00000000..e7517a20 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/graph.go @@ -0,0 +1,391 @@ +package dag + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "sort" +) + +// Graph is used to represent a dependency graph. +type Graph struct { + vertices *Set + edges *Set + downEdges map[interface{}]*Set + upEdges map[interface{}]*Set + + // JSON encoder for recording debug information + debug *encoder +} + +// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. +type Subgrapher interface { + Subgraph() Grapher +} + +// A Grapher is any type that returns a Grapher, mainly used to identify +// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they +// return themselves. +type Grapher interface { + DirectedGraph() Grapher +} + +// Vertex of the graph. +type Vertex interface{} + +// NamedVertex is an optional interface that can be implemented by Vertex +// to give it a human-friendly name that is used for outputting the graph. +type NamedVertex interface { + Vertex + Name() string +} + +func (g *Graph) DirectedGraph() Grapher { + return g +} + +// Vertices returns the list of all the vertices in the graph. +func (g *Graph) Vertices() []Vertex { + list := g.vertices.List() + result := make([]Vertex, len(list)) + for i, v := range list { + result[i] = v.(Vertex) + } + + return result +} + +// Edges returns the list of all the edges in the graph. +func (g *Graph) Edges() []Edge { + list := g.edges.List() + result := make([]Edge, len(list)) + for i, v := range list { + result[i] = v.(Edge) + } + + return result +} + +// EdgesFrom returns the list of edges from the given source. +func (g *Graph) EdgesFrom(v Vertex) []Edge { + var result []Edge + from := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Source()) == from { + result = append(result, e) + } + } + + return result +} + +// EdgesTo returns the list of edges to the given target. +func (g *Graph) EdgesTo(v Vertex) []Edge { + var result []Edge + search := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Target()) == search { + result = append(result, e) + } + } + + return result +} + +// HasVertex checks if the given Vertex is present in the graph. +func (g *Graph) HasVertex(v Vertex) bool { + return g.vertices.Include(v) +} + +// HasEdge checks if the given Edge is present in the graph. +func (g *Graph) HasEdge(e Edge) bool { + return g.edges.Include(e) +} + +// Add adds a vertex to the graph. This is safe to call multiple time with +// the same Vertex. +func (g *Graph) Add(v Vertex) Vertex { + g.init() + g.vertices.Add(v) + g.debug.Add(v) + return v +} + +// Remove removes a vertex from the graph. This will also remove any +// edges with this vertex as a source or target. +func (g *Graph) Remove(v Vertex) Vertex { + // Delete the vertex itself + g.vertices.Delete(v) + g.debug.Remove(v) + + // Delete the edges to non-existent things + for _, target := range g.DownEdges(v).List() { + g.RemoveEdge(BasicEdge(v, target)) + } + for _, source := range g.UpEdges(v).List() { + g.RemoveEdge(BasicEdge(source, v)) + } + + return nil +} + +// Replace replaces the original Vertex with replacement. If the original +// does not exist within the graph, then false is returned. Otherwise, true +// is returned. +func (g *Graph) Replace(original, replacement Vertex) bool { + // If we don't have the original, we can't do anything + if !g.vertices.Include(original) { + return false + } + + defer g.debug.BeginOperation("Replace", "").End("") + + // If they're the same, then don't do anything + if original == replacement { + return true + } + + // Add our new vertex, then copy all the edges + g.Add(replacement) + for _, target := range g.DownEdges(original).List() { + g.Connect(BasicEdge(replacement, target)) + } + for _, source := range g.UpEdges(original).List() { + g.Connect(BasicEdge(source, replacement)) + } + + // Remove our old vertex, which will also remove all the edges + g.Remove(original) + + return true +} + +// RemoveEdge removes an edge from the graph. +func (g *Graph) RemoveEdge(edge Edge) { + g.init() + g.debug.RemoveEdge(edge) + + // Delete the edge from the set + g.edges.Delete(edge) + + // Delete the up/down edges + if s, ok := g.downEdges[hashcode(edge.Source())]; ok { + s.Delete(edge.Target()) + } + if s, ok := g.upEdges[hashcode(edge.Target())]; ok { + s.Delete(edge.Source()) + } +} + +// DownEdges returns the outward edges from the source Vertex v. +func (g *Graph) DownEdges(v Vertex) *Set { + g.init() + return g.downEdges[hashcode(v)] +} + +// UpEdges returns the inward edges to the destination Vertex v. +func (g *Graph) UpEdges(v Vertex) *Set { + g.init() + return g.upEdges[hashcode(v)] +} + +// Connect adds an edge with the given source and target. This is safe to +// call multiple times with the same value. Note that the same value is +// verified through pointer equality of the vertices, not through the +// value of the edge itself. +func (g *Graph) Connect(edge Edge) { + g.init() + g.debug.Connect(edge) + + source := edge.Source() + target := edge.Target() + sourceCode := hashcode(source) + targetCode := hashcode(target) + + // Do we have this already? If so, don't add it again. + if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { + return + } + + // Add the edge to the set + g.edges.Add(edge) + + // Add the down edge + s, ok := g.downEdges[sourceCode] + if !ok { + s = new(Set) + g.downEdges[sourceCode] = s + } + s.Add(target) + + // Add the up edge + s, ok = g.upEdges[targetCode] + if !ok { + s = new(Set) + g.upEdges[targetCode] = s + } + s.Add(source) +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) StringWithNodeTypes() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + targetNodes := make(map[string]Vertex) + for _, target := range targets.List() { + dep := VertexName(target) + deps = append(deps, dep) + targetNodes[dep] = target + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) + } + } + + return buf.String() +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) String() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s\n", name)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + for _, target := range targets.List() { + deps = append(deps, VertexName(target)) + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s\n", d)) + } + } + + return buf.String() +} + +func (g *Graph) init() { + if g.vertices == nil { + g.vertices = new(Set) + } + if g.edges == nil { + g.edges = new(Set) + } + if g.downEdges == nil { + g.downEdges = make(map[interface{}]*Set) + } + if g.upEdges == nil { + g.upEdges = make(map[interface{}]*Set) + } +} + +// Dot returns a dot-formatted representation of the Graph. +func (g *Graph) Dot(opts *DotOpts) []byte { + return newMarshalGraph("", g).Dot(opts) +} + +// MarshalJSON returns a JSON representation of the entire Graph. +func (g *Graph) MarshalJSON() ([]byte, error) { + dg := newMarshalGraph("root", g) + return json.MarshalIndent(dg, "", " ") +} + +// SetDebugWriter sets the io.Writer where the Graph will record debug +// information. After this is set, the graph will immediately encode itself to +// the stream, and continue to record all subsequent operations. +func (g *Graph) SetDebugWriter(w io.Writer) { + g.debug = &encoder{w: w} + g.debug.Encode(newMarshalGraph("root", g)) +} + +// DebugVertexInfo encodes arbitrary information about a vertex in the graph +// debug logs. +func (g *Graph) DebugVertexInfo(v Vertex, info string) { + va := newVertexInfo(typeVertexInfo, v, info) + g.debug.Encode(va) +} + +// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug +// logs. +func (g *Graph) DebugEdgeInfo(e Edge, info string) { + ea := newEdgeInfo(typeEdgeInfo, e, info) + g.debug.Encode(ea) +} + +// DebugVisitInfo records a visit to a Vertex during a walk operation. +func (g *Graph) DebugVisitInfo(v Vertex, info string) { + vi := newVertexInfo(typeVisitInfo, v, info) + g.debug.Encode(vi) +} + +// DebugOperation marks the start of a set of graph transformations in +// the debug log, and returns a DebugOperationEnd func, which marks the end of +// the operation in the log. Additional information can be added to the log via +// the info parameter. +// +// The returned func's End method allows this method to be called from a single +// defer statement: +// defer g.DebugOperationBegin("OpName", "operating").End("") +// +// The returned function must be called to properly close the logical operation +// in the logs. +func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { + return g.debug.BeginOperation(operation, info) +} + +// VertexName returns the name of a vertex. +func VertexName(raw Vertex) string { + switch v := raw.(type) { + case NamedVertex: + return v.Name() + case fmt.Stringer: + return fmt.Sprintf("%s", v) + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go new file mode 100644 index 00000000..92b42151 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/set.go @@ -0,0 +1,123 @@ +package dag + +import ( + "sync" +) + +// Set is a set data structure. +type Set struct { + m map[interface{}]interface{} + once sync.Once +} + +// Hashable is the interface used by set to get the hash code of a value. +// If this isn't given, then the value of the item being added to the set +// itself is used as the comparison value. +type Hashable interface { + Hashcode() interface{} +} + +// hashcode returns the hashcode used for set elements. +func hashcode(v interface{}) interface{} { + if h, ok := v.(Hashable); ok { + return h.Hashcode() + } + + return v +} + +// Add adds an item to the set +func (s *Set) Add(v interface{}) { + s.once.Do(s.init) + s.m[hashcode(v)] = v +} + +// Delete removes an item from the set. +func (s *Set) Delete(v interface{}) { + s.once.Do(s.init) + delete(s.m, hashcode(v)) +} + +// Include returns true/false of whether a value is in the set. +func (s *Set) Include(v interface{}) bool { + s.once.Do(s.init) + _, ok := s.m[hashcode(v)] + return ok +} + +// Intersection computes the set intersection with other. +func (s *Set) Intersection(other *Set) *Set { + result := new(Set) + if s == nil { + return result + } + if other != nil { + for _, v := range s.m { + if other.Include(v) { + result.Add(v) + } + } + } + + return result +} + +// Difference returns a set with the elements that s has but +// other doesn't. +func (s *Set) Difference(other *Set) *Set { + result := new(Set) + if s != nil { + for k, v := range s.m { + var ok bool + if other != nil { + _, ok = other.m[k] + } + if !ok { + result.Add(v) + } + } + } + + return result +} + +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s *Set) Filter(cb func(interface{}) bool) *Set { + result := new(Set) + + for _, v := range s.m { + if cb(v) { + result.Add(v) + } + } + + return result +} + +// Len is the number of items in the set. +func (s *Set) Len() int { + if s == nil { + return 0 + } + + return len(s.m) +} + +// List returns the list of set elements. +func (s *Set) List() []interface{} { + if s == nil { + return nil + } + + r := make([]interface{}, 0, len(s.m)) + for _, v := range s.m { + r = append(r, v) + } + + return r +} + +func (s *Set) init() { + s.m = make(map[interface{}]interface{}) +} diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go new file mode 100644 index 00000000..9d8b25ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/tarjan.go @@ -0,0 +1,107 @@ +package dag + +// StronglyConnected returns the list of strongly connected components +// within the Graph g. This information is primarily used by this package +// for cycle detection, but strongly connected components have widespread +// use. +func StronglyConnected(g *Graph) [][]Vertex { + vs := g.Vertices() + acct := sccAcct{ + NextIndex: 1, + VertexIndex: make(map[Vertex]int, len(vs)), + } + for _, v := range vs { + // Recurse on any non-visited nodes + if acct.VertexIndex[v] == 0 { + stronglyConnected(&acct, g, v) + } + } + return acct.SCC +} + +func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { + // Initial vertex visit + index := acct.visit(v) + minIdx := index + + for _, raw := range g.DownEdges(v).List() { + target := raw.(Vertex) + targetIdx := acct.VertexIndex[target] + + // Recurse on successor if not yet visited + if targetIdx == 0 { + minIdx = min(minIdx, stronglyConnected(acct, g, target)) + } else if acct.inStack(target) { + // Check if the vertex is in the stack + minIdx = min(minIdx, targetIdx) + } + } + + // Pop the strongly connected components off the stack if + // this is a root vertex + if index == minIdx { + var scc []Vertex + for { + v2 := acct.pop() + scc = append(scc, v2) + if v2 == v { + break + } + } + + acct.SCC = append(acct.SCC, scc) + } + + return minIdx +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// sccAcct is used ot pass around accounting information for +// the StronglyConnectedComponents algorithm +type sccAcct struct { + NextIndex int + VertexIndex map[Vertex]int + Stack []Vertex + SCC [][]Vertex +} + +// visit assigns an index and pushes a vertex onto the stack +func (s *sccAcct) visit(v Vertex) int { + idx := s.NextIndex + s.VertexIndex[v] = idx + s.NextIndex++ + s.push(v) + return idx +} + +// push adds a vertex to the stack +func (s *sccAcct) push(n Vertex) { + s.Stack = append(s.Stack, n) +} + +// pop removes a vertex from the stack +func (s *sccAcct) pop() Vertex { + n := len(s.Stack) + if n == 0 { + return nil + } + vertex := s.Stack[n-1] + s.Stack = s.Stack[:n-1] + return vertex +} + +// inStack checks if a vertex is in the stack +func (s *sccAcct) inStack(needle Vertex) bool { + for _, n := range s.Stack { + if n == needle { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go new file mode 100644 index 00000000..e0b81b64 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go @@ -0,0 +1,147 @@ +package flatmap + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hil" +) + +// Expand takes a map and a key (prefix) and expands that value into +// a more complex structure. This is the reverse of the Flatten operation. +func Expand(m map[string]string, key string) interface{} { + // If the key is exactly a key in the map, just return it + if v, ok := m[key]; ok { + if v == "true" { + return true + } else if v == "false" { + return false + } + + return v + } + + // Check if the key is an array, and if so, expand the array + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == hil.UnknownValue { + return v + } + + return expandArray(m, key) + } + + // Check if this is a prefix in the map + prefix := key + "." + for k := range m { + if strings.HasPrefix(k, prefix) { + return expandMap(m, prefix) + } + } + + return nil +} + +func expandArray(m map[string]string, prefix string) []interface{} { + num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) + if err != nil { + panic(err) + } + + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[int(k)] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, num) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) + } + + return result +} + +func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + + result := make(map[string]interface{}) + for k := range m { + if !strings.HasPrefix(k, prefix) { + continue + } + + key := k[len(prefix):] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + if _, ok := result[key]; ok { + continue + } + + // skip the map count value + if key == "%" { + continue + } + + result[key] = Expand(m, k[:len(prefix)+len(key)]) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go new file mode 100644 index 00000000..9ff6e426 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go @@ -0,0 +1,71 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go new file mode 100644 index 00000000..46b72c40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go @@ -0,0 +1,82 @@ +package flatmap + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k, _ := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k, _ := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k, _ := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go new file mode 100644 index 00000000..67be1df1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go @@ -0,0 +1,41 @@ +package hilmapstructure + +import ( + "fmt" + "reflect" + + "github.com/mitchellh/mapstructure" +) + +var hilMapstructureDecodeHookEmptySlice []interface{} +var hilMapstructureDecodeHookStringSlice []string +var hilMapstructureDecodeHookEmptyMap map[string]interface{} + +// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a +// DecodeHook which defeats the backward compatibility mode of mapstructure +// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This +// allows us to use WeakDecode (desirable), but not fail on empty lists. +func WeakDecode(m interface{}, rawVal interface{}) error { + config := &mapstructure.DecoderConfig{ + DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { + sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice) + stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) + mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap) + + if (source == sliceType || source == stringSliceType) && target == mapType { + return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}") + } + + return val, nil + }, + WeaklyTypedInput: true, + Result: rawVal, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go new file mode 100644 index 00000000..306128ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -0,0 +1,1022 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sort" + "strings" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/experiment" +) + +// InputMode defines what sort of input will be asked for when Input +// is called on Context. +type InputMode byte + +const ( + // InputModeVar asks for all variables + InputModeVar InputMode = 1 << iota + + // InputModeVarUnset asks for variables which are not set yet. + // InputModeVar must be set for this to have an effect. + InputModeVarUnset + + // InputModeProvider asks for provider variables + InputModeProvider + + // InputModeStd is the standard operating mode and asks for both variables + // and providers. + InputModeStd = InputModeVar | InputModeProvider +) + +var ( + // contextFailOnShadowError will cause Context operations to return + // errors when shadow operations fail. This is only used for testing. + contextFailOnShadowError = false + + // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every + // Plan operation, effectively testing the Diff DeepCopy whenever + // a Plan occurs. This is enabled for tests. + contextTestDeepCopyOnPlan = false +) + +// ContextOpts are the user-configurable options to create a context with +// NewContext. +type ContextOpts struct { + Meta *ContextMeta + Destroy bool + Diff *Diff + Hooks []Hook + Module *module.Tree + Parallelism int + State *State + StateFutureAllowed bool + Providers map[string]ResourceProviderFactory + Provisioners map[string]ResourceProvisionerFactory + Shadow bool + Targets []string + Variables map[string]interface{} + + UIInput UIInput +} + +// ContextMeta is metadata about the running context. This is information +// that this package or structure cannot determine on its own but exposes +// into Terraform in various ways. This must be provided by the Context +// initializer. +type ContextMeta struct { + Env string // Env is the state environment +} + +// Context represents all the context that Terraform needs in order to +// perform operations on infrastructure. This structure is built using +// NewContext. See the documentation for that. +// +// Extra functions on Context can be found in context_*.go files. +type Context struct { + // Maintainer note: Anytime this struct is changed, please verify + // that newShadowContext still does the right thing. Tests should + // fail regardless but putting this note here as well. + + components contextComponentFactory + destroy bool + diff *Diff + diffLock sync.RWMutex + hooks []Hook + meta *ContextMeta + module *module.Tree + sh *stopHook + shadow bool + state *State + stateLock sync.RWMutex + targets []string + uiInput UIInput + variables map[string]interface{} + + l sync.Mutex // Lock acquired during any task + parallelSem Semaphore + providerInputConfig map[string]map[string]interface{} + runLock sync.Mutex + runCond *sync.Cond + runContext context.Context + runContextCancel context.CancelFunc + shadowErr error +} + +// NewContext creates a new Context structure. +// +// Once a Context is creator, the pointer values within ContextOpts +// should not be mutated in any way, since the pointers are copied, not +// the values themselves. +func NewContext(opts *ContextOpts) (*Context, error) { + // Validate the version requirement if it is given + if opts.Module != nil { + if err := checkRequiredVersion(opts.Module); err != nil { + return nil, err + } + } + + // Copy all the hooks and add our stop hook. We don't append directly + // to the Config so that we're not modifying that in-place. + sh := new(stopHook) + hooks := make([]Hook, len(opts.Hooks)+1) + copy(hooks, opts.Hooks) + hooks[len(opts.Hooks)] = sh + + state := opts.State + if state == nil { + state = new(State) + state.init() + } + + // If our state is from the future, then error. Callers can avoid + // this error by explicitly setting `StateFutureAllowed`. + if !opts.StateFutureAllowed && state.FromFutureTerraform() { + return nil, fmt.Errorf( + "Terraform doesn't allow running any operations against a state\n"+ + "that was written by a future Terraform version. The state is\n"+ + "reporting it is written by Terraform '%s'.\n\n"+ + "Please run at least that version of Terraform to continue.", + state.TFVersion) + } + + // Explicitly reset our state version to our current version so that + // any operations we do will write out that our latest version + // has run. + state.TFVersion = Version + + // Determine parallelism, default to 10. We do this both to limit + // CPU pressure but also to have an extra guard against rate throttling + // from providers. + par := opts.Parallelism + if par == 0 { + par = 10 + } + + // Set up the variables in the following sequence: + // 0 - Take default values from the configuration + // 1 - Take values from TF_VAR_x environment variables + // 2 - Take values specified in -var flags, overriding values + // set by environment variables if necessary. This includes + // values taken from -var-file in addition. + variables := make(map[string]interface{}) + + if opts.Module != nil { + var err error + variables, err = Variables(opts.Module, opts.Variables) + if err != nil { + return nil, err + } + } + + diff := opts.Diff + if diff == nil { + diff = &Diff{} + } + + return &Context{ + components: &basicComponentFactory{ + providers: opts.Providers, + provisioners: opts.Provisioners, + }, + destroy: opts.Destroy, + diff: diff, + hooks: hooks, + meta: opts.Meta, + module: opts.Module, + shadow: opts.Shadow, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, + + parallelSem: NewSemaphore(par), + providerInputConfig: make(map[string]map[string]interface{}), + sh: sh, + }, nil +} + +type ContextGraphOpts struct { + // If true, validates the graph structure (checks for cycles). + Validate bool + + // Legacy graphs only: won't prune the graph + Verbose bool +} + +// Graph returns the graph used for the given operation type. +// +// The most extensive or complex graph type is GraphTypePlan. +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { + if opts == nil { + opts = &ContextGraphOpts{Validate: true} + } + + log.Printf("[INFO] terraform: building graph: %s", typ) + switch typ { + case GraphTypeApply: + return (&ApplyGraphBuilder{ + Module: c.module, + Diff: c.diff, + State: c.state, + Providers: c.components.ResourceProviders(), + Provisioners: c.components.ResourceProvisioners(), + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(RootModulePath) + + case GraphTypeInput: + // The input graph is just a slightly modified plan graph + fallthrough + case GraphTypeValidate: + // The validate graph is just a slightly modified plan graph + fallthrough + case GraphTypePlan: + // Create the plan graph builder + p := &PlanGraphBuilder{ + Module: c.module, + State: c.state, + Providers: c.components.ResourceProviders(), + Targets: c.targets, + Validate: opts.Validate, + } + + // Some special cases for other graph types shared with plan currently + var b GraphBuilder = p + switch typ { + case GraphTypeInput: + b = InputGraphBuilder(p) + case GraphTypeValidate: + // We need to set the provisioners so those can be validated + p.Provisioners = c.components.ResourceProvisioners() + + b = ValidateGraphBuilder(p) + } + + return b.Build(RootModulePath) + + case GraphTypePlanDestroy: + return (&DestroyPlanGraphBuilder{ + Module: c.module, + State: c.state, + Targets: c.targets, + Validate: opts.Validate, + }).Build(RootModulePath) + + case GraphTypeRefresh: + return (&RefreshGraphBuilder{ + Module: c.module, + State: c.state, + Providers: c.components.ResourceProviders(), + Targets: c.targets, + Validate: opts.Validate, + }).Build(RootModulePath) + } + + return nil, fmt.Errorf("unknown graph type: %s", typ) +} + +// ShadowError returns any errors caught during a shadow operation. +// +// A shadow operation is an operation run in parallel to a real operation +// that performs the same tasks using new logic on copied state. The results +// are compared to ensure that the new logic works the same as the old logic. +// The shadow never affects the real operation or return values. +// +// The result of the shadow operation are only available through this function +// call after a real operation is complete. +// +// For API consumers of Context, you can safely ignore this function +// completely if you have no interest in helping report experimental feature +// errors to Terraform maintainers. Otherwise, please call this function +// after every operation and report this to the user. +// +// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect +// the real state or result of a real operation. They are purely informational +// to assist in future Terraform versions being more stable. Please message +// this effectively to the end user. +// +// This must be called only when no other operation is running (refresh, +// plan, etc.). The result can be used in parallel to any other operation +// running. +func (c *Context) ShadowError() error { + return c.shadowErr +} + +// State returns a copy of the current state associated with this context. +// +// This cannot safely be called in parallel with any other Context function. +func (c *Context) State() *State { + return c.state.DeepCopy() +} + +// Interpolater returns an Interpolater built on a copy of the state +// that can be used to test interpolation values. +func (c *Context) Interpolater() *Interpolater { + var varLock sync.Mutex + var stateLock sync.RWMutex + return &Interpolater{ + Operation: walkApply, + Meta: c.meta, + Module: c.module, + State: c.state.DeepCopy(), + StateLock: &stateLock, + VariableValues: c.variables, + VariableValuesLock: &varLock, + } +} + +// Input asks for input to fill variables and provider configurations. +// This modifies the configuration in-place, so asking for Input twice +// may result in different UI output showing different current values. +func (c *Context) Input(mode InputMode) error { + defer c.acquireRun("input")() + + if mode&InputModeVar != 0 { + // Walk the variables first for the root module. We walk them in + // alphabetical order for UX reasons. + rootConf := c.module.Config() + names := make([]string, len(rootConf.Variables)) + m := make(map[string]*config.Variable) + for i, v := range rootConf.Variables { + names[i] = v.Name + m[v.Name] = v + } + sort.Strings(names) + for _, n := range names { + // If we only care about unset variables, then if the variable + // is set, continue on. + if mode&InputModeVarUnset != 0 { + if _, ok := c.variables[n]; ok { + continue + } + } + + var valueType config.VariableType + + v := m[n] + switch valueType = v.Type(); valueType { + case config.VariableTypeUnknown: + continue + case config.VariableTypeMap: + // OK + case config.VariableTypeList: + // OK + case config.VariableTypeString: + // OK + default: + panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) + } + + // If the variable is not already set, and the variable defines a + // default, use that for the value. + if _, ok := c.variables[n]; !ok { + if v.Default != nil { + c.variables[n] = v.Default.(string) + continue + } + } + + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Content.uiInput is nil") + continue + } + + // Ask the user for a value for this variable + var value string + retry := 0 + for { + var err error + value, err = c.uiInput.Input(&InputOpts{ + Id: fmt.Sprintf("var.%s", n), + Query: fmt.Sprintf("var.%s", n), + Description: v.Description, + }) + if err != nil { + return fmt.Errorf( + "Error asking for %s: %s", n, err) + } + + if value == "" && v.Required() { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + return fmt.Errorf("missing required value for %q", n) + } + retry++ + continue + } + + break + } + + // no value provided, so don't set the variable at all + if value == "" { + continue + } + + decoded, err := parseVariableAsHCL(n, value, valueType) + if err != nil { + return err + } + + if decoded != nil { + c.variables[n] = decoded + } + } + } + + if mode&InputModeProvider != 0 { + // Build the graph + graph, err := c.Graph(GraphTypeInput, nil) + if err != nil { + return err + } + + // Do the walk + if _, err := c.walk(graph, nil, walkInput); err != nil { + return err + } + } + + return nil +} + +// Apply applies the changes represented by this context and returns +// the resulting state. +// +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. +func (c *Context) Apply() (*State, error) { + defer c.acquireRun("apply")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Build the graph. + graph, err := c.Graph(GraphTypeApply, nil) + if err != nil { + return nil, err + } + + // Determine the operation + operation := walkApply + if c.destroy { + operation = walkDestroy + } + + // Walk the graph + walker, err := c.walk(graph, graph, operation) + if len(walker.ValidationErrors) > 0 { + err = multierror.Append(err, walker.ValidationErrors...) + } + + // Clean out any unused things + c.state.prune() + + return c.state, err +} + +// Plan generates an execution plan for the given context. +// +// The execution plan encapsulates the context and can be stored +// in order to reinstantiate a context later for Apply. +// +// Plan also updates the diff of this context to be the diff generated +// by the plan, so Apply can be called after. +func (c *Context) Plan() (*Plan, error) { + defer c.acquireRun("plan")() + + p := &Plan{ + Module: c.module, + Vars: c.variables, + State: c.state, + Targets: c.targets, + } + + var operation walkOperation + if c.destroy { + operation = walkPlanDestroy + } else { + // Set our state to be something temporary. We do this so that + // the plan can update a fake state so that variables work, then + // we replace it back with our old state. + old := c.state + if old == nil { + c.state = &State{} + c.state.init() + } else { + c.state = old.DeepCopy() + } + defer func() { + c.state = old + }() + + operation = walkPlan + } + + // Setup our diff + c.diffLock.Lock() + c.diff = new(Diff) + c.diff.init() + c.diffLock.Unlock() + + // Build the graph. + graphType := GraphTypePlan + if c.destroy { + graphType = GraphTypePlanDestroy + } + graph, err := c.Graph(graphType, nil) + if err != nil { + return nil, err + } + + // Do the walk + walker, err := c.walk(graph, graph, operation) + if err != nil { + return nil, err + } + p.Diff = c.diff + + // If this is true, it means we're running unit tests. In this case, + // we perform a deep copy just to ensure that all context tests also + // test that a diff is copy-able. This will panic if it fails. This + // is enabled during unit tests. + // + // This should never be true during production usage, but even if it is, + // it can't do any real harm. + if contextTestDeepCopyOnPlan { + p.Diff.DeepCopy() + } + + /* + // We don't do the reverification during the new destroy plan because + // it will use a different apply process. + if X_legacyGraph { + // Now that we have a diff, we can build the exact graph that Apply will use + // and catch any possible cycles during the Plan phase. + if _, err := c.Graph(GraphTypeLegacy, nil); err != nil { + return nil, err + } + } + */ + + var errs error + if len(walker.ValidationErrors) > 0 { + errs = multierror.Append(errs, walker.ValidationErrors...) + } + return p, errs +} + +// Refresh goes through all the resources in the state and refreshes them +// to their latest state. This will update the state that this context +// works with, along with returning it. +// +// Even in the case an error is returned, the state may be returned and +// will potentially be partially updated. +func (c *Context) Refresh() (*State, error) { + defer c.acquireRun("refresh")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Build the graph. + graph, err := c.Graph(GraphTypeRefresh, nil) + if err != nil { + return nil, err + } + + // Do the walk + if _, err := c.walk(graph, graph, walkRefresh); err != nil { + return nil, err + } + + // Clean out any unused things + c.state.prune() + + return c.state, nil +} + +// Stop stops the running task. +// +// Stop will block until the task completes. +func (c *Context) Stop() { + log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") + + c.l.Lock() + defer c.l.Unlock() + + // If we're running, then stop + if c.runContextCancel != nil { + log.Printf("[WARN] terraform: run context exists, stopping") + + // Tell the hook we want to stop + c.sh.Stop() + + // Stop the context + c.runContextCancel() + c.runContextCancel = nil + } + + // Grab the condition var before we exit + if cond := c.runCond; cond != nil { + cond.Wait() + } + + log.Printf("[WARN] terraform: stop complete") +} + +// Validate validates the configuration and returns any warnings or errors. +func (c *Context) Validate() ([]string, []error) { + defer c.acquireRun("validate")() + + var errs error + + // Validate the configuration itself + if err := c.module.Validate(); err != nil { + errs = multierror.Append(errs, err) + } + + // This only needs to be done for the root module, since inter-module + // variables are validated in the module tree. + if config := c.module.Config(); config != nil { + // Validate the user variables + if err := smcUserVariables(config, c.variables); len(err) > 0 { + errs = multierror.Append(errs, err...) + } + } + + // If we have errors at this point, the graphing has no chance, + // so just bail early. + if errs != nil { + return nil, []error{errs} + } + + // Build the graph so we can walk it and run Validate on nodes. + // We also validate the graph generated here, but this graph doesn't + // necessarily match the graph that Plan will generate, so we'll validate the + // graph again later after Planning. + graph, err := c.Graph(GraphTypeValidate, nil) + if err != nil { + return nil, []error{err} + } + + // Walk + walker, err := c.walk(graph, graph, walkValidate) + if err != nil { + return nil, multierror.Append(errs, err).Errors + } + + // Return the result + rerrs := multierror.Append(errs, walker.ValidationErrors...) + + sort.Strings(walker.ValidationWarnings) + sort.Slice(rerrs.Errors, func(i, j int) bool { + return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() + }) + + return walker.ValidationWarnings, rerrs.Errors +} + +// Module returns the module tree associated with this context. +func (c *Context) Module() *module.Tree { + return c.module +} + +// Variables will return the mapping of variables that were defined +// for this Context. If Input was called, this mapping may be different +// than what was given. +func (c *Context) Variables() map[string]interface{} { + return c.variables +} + +// SetVariable sets a variable after a context has already been built. +func (c *Context) SetVariable(k string, v interface{}) { + c.variables[k] = v +} + +func (c *Context) acquireRun(phase string) func() { + // With the run lock held, grab the context lock to make changes + // to the run context. + c.l.Lock() + defer c.l.Unlock() + + // Wait until we're no longer running + for c.runCond != nil { + c.runCond.Wait() + } + + // Build our lock + c.runCond = sync.NewCond(&c.l) + + // Setup debugging + dbug.SetPhase(phase) + + // Create a new run context + c.runContext, c.runContextCancel = context.WithCancel(context.Background()) + + // Reset the stop hook so we're not stopped + c.sh.Reset() + + // Reset the shadow errors + c.shadowErr = nil + + return c.releaseRun +} + +func (c *Context) releaseRun() { + // Grab the context lock so that we can make modifications to fields + c.l.Lock() + defer c.l.Unlock() + + // setting the phase to "INVALID" lets us easily detect if we have + // operations happening outside of a run, or we missed setting the proper + // phase + dbug.SetPhase("INVALID") + + // End our run. We check if runContext is non-nil because it can be + // set to nil if it was cancelled via Stop() + if c.runContextCancel != nil { + c.runContextCancel() + } + + // Unlock all waiting our condition + cond := c.runCond + c.runCond = nil + cond.Broadcast() + + // Unset the context + c.runContext = nil +} + +func (c *Context) walk( + graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) { + // Keep track of the "real" context which is the context that does + // the real work: talking to real providers, modifying real state, etc. + realCtx := c + + // If we don't want shadowing, remove it + if !experiment.Enabled(experiment.X_shadow) { + shadow = nil + } + + // Just log this so we can see it in a debug log + if !c.shadow { + log.Printf("[WARN] terraform: shadow graph disabled") + shadow = nil + } + + // If we have a shadow graph, walk that as well + var shadowCtx *Context + var shadowCloser Shadow + if shadow != nil { + // Build the shadow context. In the process, override the real context + // with the one that is wrapped so that the shadow context can verify + // the results of the real. + realCtx, shadowCtx, shadowCloser = newShadowContext(c) + } + + log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) + + walker := &ContextGraphWalker{ + Context: realCtx, + Operation: operation, + StopContext: c.runContext, + } + + // Watch for a stop so we can call the provider Stop() API. + watchStop, watchWait := c.watchStop(walker) + + // Walk the real graph, this will block until it completes + realErr := graph.Walk(walker) + + // Close the channel so the watcher stops, and wait for it to return. + close(watchStop) + <-watchWait + + // If we have a shadow graph and we interrupted the real graph, then + // we just close the shadow and never verify it. It is non-trivial to + // recreate the exact execution state up until an interruption so this + // isn't supported with shadows at the moment. + if shadowCloser != nil && c.sh.Stopped() { + // Ignore the error result, there is nothing we could care about + shadowCloser.CloseShadow() + + // Set it to nil so we don't do anything + shadowCloser = nil + } + + // If we have a shadow graph, wait for that to complete. + if shadowCloser != nil { + // Build the graph walker for the shadow. We also wrap this in + // a panicwrap so that panics are captured. For the shadow graph, + // we just want panics to be normal errors rather than to crash + // Terraform. + shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{ + Context: shadowCtx, + Operation: operation, + }) + + // Kick off the shadow walk. This will block on any operations + // on the real walk so it is fine to start first. + log.Printf("[INFO] Starting shadow graph walk: %s", operation.String()) + shadowCh := make(chan error) + go func() { + shadowCh <- shadow.Walk(shadowWalker) + }() + + // Notify the shadow that we're done + if err := shadowCloser.CloseShadow(); err != nil { + c.shadowErr = multierror.Append(c.shadowErr, err) + } + + // Wait for the walk to end + log.Printf("[DEBUG] Waiting for shadow graph to complete...") + shadowWalkErr := <-shadowCh + + // Get any shadow errors + if err := shadowCloser.ShadowError(); err != nil { + c.shadowErr = multierror.Append(c.shadowErr, err) + } + + // Verify the contexts (compare) + if err := shadowContextVerify(realCtx, shadowCtx); err != nil { + c.shadowErr = multierror.Append(c.shadowErr, err) + } + + // At this point, if we're supposed to fail on error, then + // we PANIC. Some tests just verify that there is an error, + // so simply appending it to realErr and returning could hide + // shadow problems. + // + // This must be done BEFORE appending shadowWalkErr since the + // shadowWalkErr may include expected errors. + // + // We only do this if we don't have a real error. In the case of + // a real error, we can't guarantee what nodes were and weren't + // traversed in parallel scenarios so we can't guarantee no + // shadow errors. + if c.shadowErr != nil && contextFailOnShadowError && realErr == nil { + panic(multierror.Prefix(c.shadowErr, "shadow graph:")) + } + + // Now, if we have a walk error, we append that through + if shadowWalkErr != nil { + c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr) + } + + if c.shadowErr == nil { + log.Printf("[INFO] Shadow graph success!") + } else { + log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr) + + // If we're supposed to fail on shadow errors, then report it + if contextFailOnShadowError { + realErr = multierror.Append(realErr, multierror.Prefix( + c.shadowErr, "shadow graph:")) + } + } + } + + return walker, realErr +} + +// watchStop immediately returns a `stop` and a `wait` chan after dispatching +// the watchStop goroutine. This will watch the runContext for cancellation and +// stop the providers accordingly. When the watch is no longer needed, the +// `stop` chan should be closed before waiting on the `wait` chan. +// The `wait` chan is important, because without synchronizing with the end of +// the watchStop goroutine, the runContext may also be closed during the select +// incorrectly causing providers to be stopped. Even if the graph walk is done +// at that point, stopping a provider permanently cancels its StopContext which +// can cause later actions to fail. +func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { + stop := make(chan struct{}) + wait := make(chan struct{}) + + // get the runContext cancellation channel now, because releaseRun will + // write to the runContext field. + done := c.runContext.Done() + + go func() { + defer close(wait) + // Wait for a stop or completion + select { + case <-done: + // done means the context was canceled, so we need to try and stop + // providers. + case <-stop: + // our own stop channel was closed. + return + } + + // If we're here, we're stopped, trigger the call. + + { + // Copy the providers so that a misbehaved blocking Stop doesn't + // completely hang Terraform. + walker.providerLock.Lock() + ps := make([]ResourceProvider, 0, len(walker.providerCache)) + for _, p := range walker.providerCache { + ps = append(ps, p) + } + defer walker.providerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + + { + // Call stop on all the provisioners + walker.provisionerLock.Lock() + ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) + for _, p := range walker.provisionerCache { + ps = append(ps, p) + } + defer walker.provisionerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + }() + + return stop, wait +} + +// parseVariableAsHCL parses the value of a single variable as would have been specified +// on the command line via -var or in an environment variable named TF_VAR_x, where x is +// the name of the variable. In order to get around the restriction of HCL requiring a +// top level object, we prepend a sentinel key, decode the user-specified value as its +// value and pull the value back out of the resulting map. +func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) { + // expecting a string so don't decode anything, just strip quotes + if targetType == config.VariableTypeString { + return strings.Trim(input, `"`), nil + } + + // return empty types + if strings.TrimSpace(input) == "" { + switch targetType { + case config.VariableTypeList: + return []interface{}{}, nil + case config.VariableTypeMap: + return make(map[string]interface{}), nil + } + } + + const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY" + inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input) + + var decoded map[string]interface{} + err := hcl.Decode(&decoded, inputWithSentinal) + if err != nil { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err) + } + + if len(decoded) != 1 { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input) + } + + parsedValue, ok := decoded[sentinelValue] + if !ok { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) + } + + switch targetType { + case config.VariableTypeList: + return parsedValue, nil + case config.VariableTypeMap: + if list, ok := parsedValue.([]map[string]interface{}); ok { + return list[0], nil + } + + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) + default: + panic(fmt.Errorf("unknown type %s", targetType.Printable())) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go new file mode 100644 index 00000000..f1d57760 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// ImportOpts are used as the configuration for Import. +type ImportOpts struct { + // Targets are the targets to import + Targets []*ImportTarget + + // Module is optional, and specifies a config module that is loaded + // into the graph and evaluated. The use case for this is to provide + // provider configuration. + Module *module.Tree +} + +// ImportTarget is a single resource to import. +type ImportTarget struct { + // Addr is the full resource address of the resource to import. + // Example: "module.foo.aws_instance.bar" + Addr string + + // ID is the ID of the resource to import. This is resource-specific. + ID string + + // Provider string + Provider string +} + +// Import takes already-created external resources and brings them +// under Terraform management. Import requires the exact type, name, and ID +// of the resources to import. +// +// This operation is idempotent. If the requested resource is already +// imported, no changes are made to the state. +// +// Further, this operation also gracefully handles partial state. If during +// an import there is a failure, all previously imported resources remain +// imported. +func (c *Context) Import(opts *ImportOpts) (*State, error) { + // Hold a lock since we can modify our own state here + defer c.acquireRun("import")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // If no module is given, default to the module configured with + // the Context. + module := opts.Module + if module == nil { + module = c.module + } + + // Initialize our graph builder + builder := &ImportGraphBuilder{ + ImportTargets: opts.Targets, + Module: module, + Providers: c.components.ResourceProviders(), + } + + // Build the graph! + graph, err := builder.Build(RootModulePath) + if err != nil { + return c.state, err + } + + // Walk it + if _, err := c.walk(graph, nil, walkImport); err != nil { + return c.state, err + } + + // Clean the state + c.state.prune() + + return c.state, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go new file mode 100644 index 00000000..a9fae6c2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -0,0 +1,866 @@ +package terraform + +import ( + "bufio" + "bytes" + "fmt" + "reflect" + "regexp" + "sort" + "strings" + "sync" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff trackes the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path []string) *ModuleDiff { + m := &ModuleDiff{Path: path} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path []string) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + if reflect.DeepEqual(mod.Path, path) { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(rootModulePath) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +func (d *Diff) init() { + if d.Modules == nil { + rootDiff := &ModuleDiff{Path: rootModulePath} + d.Modules = []*ModuleDiff{rootDiff} + } + for _, m := range d.Modules { + m.init() + } +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // mean to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within in the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // TODO: check for the same value if not computed + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go new file mode 100644 index 00000000..3cb088a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -0,0 +1,63 @@ +package terraform + +import ( + "log" + "strings" +) + +// EvalNode is the interface that must be implemented by graph nodes to +// evaluate/execute. +type EvalNode interface { + // Eval evaluates this node with the given context. The second parameter + // are the argument values. These will match in order and 1-1 with the + // results of the Args() return value. + Eval(EvalContext) (interface{}, error) +} + +// GraphNodeEvalable is the interface that graph nodes must implement +// to enable valuation. +type GraphNodeEvalable interface { + EvalTree() EvalNode +} + +// EvalEarlyExitError is a special error return value that can be returned +// by eval nodes that does an early exit. +type EvalEarlyExitError struct{} + +func (EvalEarlyExitError) Error() string { return "early exit" } + +// Eval evaluates the given EvalNode with the given context, properly +// evaluating all args in the correct order. +func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { + // Call the lower level eval which doesn't understand early exit, + // and if we early exit, it isn't an error. + result, err := EvalRaw(n, ctx) + if err != nil { + if _, ok := err.(EvalEarlyExitError); ok { + return nil, nil + } + } + + return result, err +} + +// EvalRaw is like Eval except that it returns all errors, even if they +// signal something normal such as EvalEarlyExitError. +func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { + path := "unknown" + if ctx != nil { + path = strings.Join(ctx.Path(), ".") + } + + log.Printf("[DEBUG] %s: eval: %T", path, n) + output, err := n.Eval(ctx) + if err != nil { + if _, ok := err.(EvalEarlyExitError); ok { + log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) + } else { + log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) + } + } + + return output, err +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go new file mode 100644 index 00000000..2f6a4973 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -0,0 +1,359 @@ +package terraform + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" +) + +// EvalApply is an EvalNode implementation that writes the diff to +// the full diff. +type EvalApply struct { + Info *InstanceInfo + State **InstanceState + Diff **InstanceDiff + Provider *ResourceProvider + Output **InstanceState + CreateNew *bool + Error *error +} + +// TODO: test +func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { + diff := *n.Diff + provider := *n.Provider + state := *n.State + + // If we have no diff, we have nothing to do! + if diff.Empty() { + log.Printf( + "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) + return nil, nil + } + + // Remove any output values from the diff + for k, ad := range diff.CopyAttributes() { + if ad.Type == DiffAttrOutput { + diff.DelAttribute(k) + } + } + + // If the state is nil, make it non-nil + if state == nil { + state = new(InstanceState) + } + state.init() + + // Flag if we're creating a new instance + if n.CreateNew != nil { + *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() + } + + // With the completed diff, apply! + log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) + state, err := provider.Apply(n.Info, state, diff) + if state == nil { + state = new(InstanceState) + } + state.init() + + // Force the "id" attribute to be our ID + if state.ID != "" { + state.Attributes["id"] = state.ID + } + + // If the value is the unknown variable value, then it is an error. + // In this case we record the error and remove it from the state + for ak, av := range state.Attributes { + if av == config.UnknownVariableValue { + err = multierror.Append(err, fmt.Errorf( + "Attribute with unknown value: %s", ak)) + delete(state.Attributes, ak) + } + } + + // Write the final state + if n.Output != nil { + *n.Output = state + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + if err != nil { + if n.Error != nil { + helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) + *n.Error = multierror.Append(*n.Error, helpfulErr) + } else { + return nil, err + } + } + + return nil, nil +} + +// EvalApplyPre is an EvalNode implementation that does the pre-Apply work +type EvalApplyPre struct { + Info *InstanceInfo + State **InstanceState + Diff **InstanceDiff +} + +// TODO: test +func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + diff := *n.Diff + + // If the state is nil, make it non-nil + if state == nil { + state = new(InstanceState) + } + state.init() + + { + // Call post-apply hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Info, state, diff) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalApplyPost is an EvalNode implementation that does the post-Apply work +type EvalApplyPost struct { + Info *InstanceInfo + State **InstanceState + Error *error +} + +// TODO: test +func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + { + // Call post-apply hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Info, state, *n.Error) + }) + if err != nil { + return nil, err + } + } + + return nil, *n.Error +} + +// EvalApplyProvisioners is an EvalNode implementation that executes +// the provisioners for a resource. +// +// TODO(mitchellh): This should probably be split up into a more fine-grained +// ApplyProvisioner (single) that is looped over. +type EvalApplyProvisioners struct { + Info *InstanceInfo + State **InstanceState + Resource *config.Resource + InterpResource *Resource + CreateNew *bool + Error *error + + // When is the type of provisioner to run at this point + When config.ProvisionerWhen +} + +// TODO: test +func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + if n.CreateNew != nil && !*n.CreateNew { + // If we're not creating a new resource, then don't run provisioners + return nil, nil + } + + provs := n.filterProvisioners() + if len(provs) == 0 { + // We have no provisioners, so don't do anything + return nil, nil + } + + // taint tells us whether to enable tainting. + taint := n.When == config.ProvisionerWhenCreate + + if n.Error != nil && *n.Error != nil { + if taint { + state.Tainted = true + } + + // We're already tainted, so just return out + return nil, nil + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionResource(n.Info, state) + }) + if err != nil { + return nil, err + } + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + err := n.apply(ctx, provs) + if err != nil { + if taint { + state.Tainted = true + } + + if n.Error != nil { + *n.Error = multierror.Append(*n.Error, err) + } else { + return nil, err + } + } + + { + // Call post hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionResource(n.Info, state) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { + // Fast path the zero case + if n.Resource == nil { + return nil + } + + if len(n.Resource.Provisioners) == 0 { + return nil + } + + result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) + for _, p := range n.Resource.Provisioners { + if p.When == n.When { + result = append(result, p) + } + } + + return result +} + +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { + state := *n.State + + // Store the original connection info, restore later + origConnInfo := state.Ephemeral.ConnInfo + defer func() { + state.Ephemeral.ConnInfo = origConnInfo + }() + + for _, prov := range provs { + // Get the provisioner + provisioner := ctx.Provisioner(prov.Type) + + // Interpolate the provisioner config + provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource) + if err != nil { + return err + } + + // Interpolate the conn info, since it may contain variables + connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) + if err != nil { + return err + } + + // Merge the connection information + overlay := make(map[string]string) + if origConnInfo != nil { + for k, v := range origConnInfo { + overlay[k] = v + } + } + for k, v := range connInfo.Config { + switch vt := v.(type) { + case string: + overlay[k] = vt + case int64: + overlay[k] = strconv.FormatInt(vt, 10) + case int32: + overlay[k] = strconv.FormatInt(int64(vt), 10) + case int: + overlay[k] = strconv.FormatInt(int64(vt), 10) + case float32: + overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) + case float64: + overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64) + case bool: + overlay[k] = strconv.FormatBool(vt) + default: + overlay[k] = fmt.Sprintf("%v", vt) + } + } + state.Ephemeral.ConnInfo = overlay + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvision(n.Info, prov.Type) + }) + if err != nil { + return err + } + } + + // The output function + outputFn := func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(n.Info, prov.Type, msg) + return HookActionContinue, nil + }) + } + + // Invoke the Provisioner + output := CallbackUIOutput{OutputFn: outputFn} + applyErr := provisioner.Apply(&output, state, provConfig) + + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvision(n.Info, prov.Type, applyErr) + }) + + // Handle the error before we deal with the hook + if applyErr != nil { + // Determine failure behavior + switch prov.OnFailure { + case config.ProvisionerOnFailureContinue: + log.Printf( + "[INFO] apply: %s [%s]: error during provision, continue requested", + n.Info.Id, prov.Type) + + case config.ProvisionerOnFailureFail: + return applyErr + } + } + + // Deal with the hook + if hookErr != nil { + return hookErr + } + } + + return nil + +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go new file mode 100644 index 00000000..715e79e1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go @@ -0,0 +1,38 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalPreventDestroy is an EvalNode implementation that returns an +// error if a resource has PreventDestroy configured and the diff +// would destroy the resource. +type EvalCheckPreventDestroy struct { + Resource *config.Resource + ResourceId string + Diff **InstanceDiff +} + +func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { + if n.Diff == nil || *n.Diff == nil || n.Resource == nil { + return nil, nil + } + + diff := *n.Diff + preventDestroy := n.Resource.Lifecycle.PreventDestroy + + if diff.GetDestroy() && preventDestroy { + resourceId := n.ResourceId + if resourceId == "" { + resourceId = n.Resource.Id() + } + + return nil, fmt.Errorf(preventDestroyErrStr, resourceId) + } + + return nil, nil +} + +const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.` diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go new file mode 100644 index 00000000..a1f815b7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go @@ -0,0 +1,84 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/config" +) + +// EvalContext is the interface that is given to eval nodes to execute. +type EvalContext interface { + // Stopped returns a channel that is closed when evaluation is stopped + // via Terraform.Context.Stop() + Stopped() <-chan struct{} + + // Path is the current module path. + Path() []string + + // Hook is used to call hook methods. The callback is called for each + // hook and should return the hook action to take and the error. + Hook(func(Hook) (HookAction, error)) error + + // Input is the UIInput object for interacting with the UI. + Input() UIInput + + // InitProvider initializes the provider with the given name and + // returns the implementation of the resource provider or an error. + // + // It is an error to initialize the same provider more than once. + InitProvider(string) (ResourceProvider, error) + + // Provider gets the provider instance with the given name (already + // initialized) or returns nil if the provider isn't initialized. + Provider(string) ResourceProvider + + // CloseProvider closes provider connections that aren't needed anymore. + CloseProvider(string) error + + // ConfigureProvider configures the provider with the given + // configuration. This is a separate context call because this call + // is used to store the provider configuration for inheritance lookups + // with ParentProviderConfig(). + ConfigureProvider(string, *ResourceConfig) error + SetProviderConfig(string, *ResourceConfig) error + ParentProviderConfig(string) *ResourceConfig + + // ProviderInput and SetProviderInput are used to configure providers + // from user input. + ProviderInput(string) map[string]interface{} + SetProviderInput(string, map[string]interface{}) + + // InitProvisioner initializes the provisioner with the given name and + // returns the implementation of the resource provisioner or an error. + // + // It is an error to initialize the same provisioner more than once. + InitProvisioner(string) (ResourceProvisioner, error) + + // Provisioner gets the provisioner instance with the given name (already + // initialized) or returns nil if the provisioner isn't initialized. + Provisioner(string) ResourceProvisioner + + // CloseProvisioner closes provisioner connections that aren't needed + // anymore. + CloseProvisioner(string) error + + // Interpolate takes the given raw configuration and completes + // the interpolations, returning the processed ResourceConfig. + // + // The resource argument is optional. If given, it is the resource + // that is currently being acted upon. + Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) + + // SetVariables sets the variables for the module within + // this context with the name n. This function call is additive: + // the second parameter is merged with any previous call. + SetVariables(string, map[string]interface{}) + + // Diff returns the global diff as well as the lock that should + // be used to modify that diff. + Diff() (*Diff, *sync.RWMutex) + + // State returns the global state as well as the lock that should + // be used to modify that state. + State() (*State, *sync.RWMutex) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go new file mode 100644 index 00000000..3dcfb227 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -0,0 +1,347 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "strings" + "sync" + + "github.com/hashicorp/terraform/config" +) + +// BuiltinEvalContext is an EvalContext implementation that is used by +// Terraform by default. +type BuiltinEvalContext struct { + // StopContext is the context used to track whether we're complete + StopContext context.Context + + // PathValue is the Path that this context is operating within. + PathValue []string + + // Interpolater setting below affect the interpolation of variables. + // + // The InterpolaterVars are the exact value for ${var.foo} values. + // The map is shared between all contexts and is a mapping of + // PATH to KEY to VALUE. Because it is shared by all contexts as well + // as the Interpolater itself, it is protected by InterpolaterVarLock + // which must be locked during any access to the map. + Interpolater *Interpolater + InterpolaterVars map[string]map[string]interface{} + InterpolaterVarLock *sync.Mutex + + Components contextComponentFactory + Hooks []Hook + InputValue UIInput + ProviderCache map[string]ResourceProvider + ProviderConfigCache map[string]*ResourceConfig + ProviderInputConfig map[string]map[string]interface{} + ProviderLock *sync.Mutex + ProvisionerCache map[string]ResourceProvisioner + ProvisionerLock *sync.Mutex + DiffValue *Diff + DiffLock *sync.RWMutex + StateValue *State + StateLock *sync.RWMutex + + once sync.Once +} + +func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { + // This can happen during tests. During tests, we just block forever. + if ctx.StopContext == nil { + return nil + } + + return ctx.StopContext.Done() +} + +func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + for _, h := range ctx.Hooks { + action, err := fn(h) + if err != nil { + return err + } + + switch action { + case HookActionContinue: + continue + case HookActionHalt: + // Return an early exit error to trigger an early exit + log.Printf("[WARN] Early exit triggered by hook: %T", h) + return EvalEarlyExitError{} + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Input() UIInput { + return ctx.InputValue +} + +func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { + ctx.once.Do(ctx.init) + + // If we already initialized, it is an error + if p := ctx.Provider(n); p != nil { + return nil, fmt.Errorf("Provider '%s' already initialized", n) + } + + // Warning: make sure to acquire these locks AFTER the call to Provider + // above, since it also acquires locks. + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + key := PathCacheKey(providerPath) + + typeName := strings.SplitN(n, ".", 2)[0] + p, err := ctx.Components.ResourceProvider(typeName, key) + if err != nil { + return nil, err + } + + ctx.ProviderCache[key] = p + return p, nil +} + +func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + return ctx.ProviderCache[PathCacheKey(providerPath)] +} + +func (ctx *BuiltinEvalContext) CloseProvider(n string) error { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + var provider interface{} + provider = ctx.ProviderCache[PathCacheKey(providerPath)] + if provider != nil { + if p, ok := provider.(ResourceProviderCloser); ok { + delete(ctx.ProviderCache, PathCacheKey(providerPath)) + return p.Close() + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) ConfigureProvider( + n string, cfg *ResourceConfig) error { + p := ctx.Provider(n) + if p == nil { + return fmt.Errorf("Provider '%s' not initialized", n) + } + + if err := ctx.SetProviderConfig(n, cfg); err != nil { + return nil + } + + return p.Configure(cfg) +} + +func (ctx *BuiltinEvalContext) SetProviderConfig( + n string, cfg *ResourceConfig) error { + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg + ctx.ProviderLock.Unlock() + + return nil +} + +func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + // Make a copy of the path so we can safely edit it + path := ctx.Path() + pathCopy := make([]string, len(path)+1) + copy(pathCopy, path) + + // Go up the tree. + for i := len(path) - 1; i >= 0; i-- { + pathCopy[i+1] = n + k := PathCacheKey(pathCopy[:i+2]) + if v, ok := ctx.ProviderInputConfig[k]; ok { + return v + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c + ctx.ProviderLock.Unlock() +} + +func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + // Make a copy of the path so we can safely edit it + path := ctx.Path() + pathCopy := make([]string, len(path)+1) + copy(pathCopy, path) + + // Go up the tree. + for i := len(path) - 1; i >= 0; i-- { + pathCopy[i+1] = n + k := PathCacheKey(pathCopy[:i+2]) + if v, ok := ctx.ProviderConfigCache[k]; ok { + return v + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) InitProvisioner( + n string) (ResourceProvisioner, error) { + ctx.once.Do(ctx.init) + + // If we already initialized, it is an error + if p := ctx.Provisioner(n); p != nil { + return nil, fmt.Errorf("Provisioner '%s' already initialized", n) + } + + // Warning: make sure to acquire these locks AFTER the call to Provisioner + // above, since it also acquires locks. + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + key := PathCacheKey(provPath) + + p, err := ctx.Components.ResourceProvisioner(n, key) + if err != nil { + return nil, err + } + + ctx.ProvisionerCache[key] = p + return p, nil +} + +func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + + return ctx.ProvisionerCache[PathCacheKey(provPath)] +} + +func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + + var prov interface{} + prov = ctx.ProvisionerCache[PathCacheKey(provPath)] + if prov != nil { + if p, ok := prov.(ResourceProvisionerCloser); ok { + delete(ctx.ProvisionerCache, PathCacheKey(provPath)) + return p.Close() + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Interpolate( + cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { + if cfg != nil { + scope := &InterpolationScope{ + Path: ctx.Path(), + Resource: r, + } + + vs, err := ctx.Interpolater.Values(scope, cfg.Variables) + if err != nil { + return nil, err + } + + // Do the interpolation + if err := cfg.Interpolate(vs); err != nil { + return nil, err + } + } + + result := NewResourceConfig(cfg) + result.interpolateForce() + return result, nil +} + +func (ctx *BuiltinEvalContext) Path() []string { + return ctx.PathValue +} + +func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { + ctx.InterpolaterVarLock.Lock() + defer ctx.InterpolaterVarLock.Unlock() + + path := make([]string, len(ctx.Path())+1) + copy(path, ctx.Path()) + path[len(path)-1] = n + key := PathCacheKey(path) + + vars := ctx.InterpolaterVars[key] + if vars == nil { + vars = make(map[string]interface{}) + ctx.InterpolaterVars[key] = vars + } + + for k, v := range vs { + vars[k] = v + } +} + +func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { + return ctx.DiffValue, ctx.DiffLock +} + +func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { + return ctx.StateValue, ctx.StateLock +} + +func (ctx *BuiltinEvalContext) init() { +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go new file mode 100644 index 00000000..4f90d5b1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go @@ -0,0 +1,208 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/config" +) + +// MockEvalContext is a mock version of EvalContext that can be used +// for tests. +type MockEvalContext struct { + StoppedCalled bool + StoppedValue <-chan struct{} + + HookCalled bool + HookHook Hook + HookError error + + InputCalled bool + InputInput UIInput + + InitProviderCalled bool + InitProviderName string + InitProviderProvider ResourceProvider + InitProviderError error + + ProviderCalled bool + ProviderName string + ProviderProvider ResourceProvider + + CloseProviderCalled bool + CloseProviderName string + CloseProviderProvider ResourceProvider + + ProviderInputCalled bool + ProviderInputName string + ProviderInputConfig map[string]interface{} + + SetProviderInputCalled bool + SetProviderInputName string + SetProviderInputConfig map[string]interface{} + + ConfigureProviderCalled bool + ConfigureProviderName string + ConfigureProviderConfig *ResourceConfig + ConfigureProviderError error + + SetProviderConfigCalled bool + SetProviderConfigName string + SetProviderConfigConfig *ResourceConfig + + ParentProviderConfigCalled bool + ParentProviderConfigName string + ParentProviderConfigConfig *ResourceConfig + + InitProvisionerCalled bool + InitProvisionerName string + InitProvisionerProvisioner ResourceProvisioner + InitProvisionerError error + + ProvisionerCalled bool + ProvisionerName string + ProvisionerProvisioner ResourceProvisioner + + CloseProvisionerCalled bool + CloseProvisionerName string + CloseProvisionerProvisioner ResourceProvisioner + + InterpolateCalled bool + InterpolateConfig *config.RawConfig + InterpolateResource *Resource + InterpolateConfigResult *ResourceConfig + InterpolateError error + + PathCalled bool + PathPath []string + + SetVariablesCalled bool + SetVariablesModule string + SetVariablesVariables map[string]interface{} + + DiffCalled bool + DiffDiff *Diff + DiffLock *sync.RWMutex + + StateCalled bool + StateState *State + StateLock *sync.RWMutex +} + +func (c *MockEvalContext) Stopped() <-chan struct{} { + c.StoppedCalled = true + return c.StoppedValue +} + +func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + c.HookCalled = true + if c.HookHook != nil { + if _, err := fn(c.HookHook); err != nil { + return err + } + } + + return c.HookError +} + +func (c *MockEvalContext) Input() UIInput { + c.InputCalled = true + return c.InputInput +} + +func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { + c.InitProviderCalled = true + c.InitProviderName = n + return c.InitProviderProvider, c.InitProviderError +} + +func (c *MockEvalContext) Provider(n string) ResourceProvider { + c.ProviderCalled = true + c.ProviderName = n + return c.ProviderProvider +} + +func (c *MockEvalContext) CloseProvider(n string) error { + c.CloseProviderCalled = true + c.CloseProviderName = n + return nil +} + +func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { + c.ConfigureProviderCalled = true + c.ConfigureProviderName = n + c.ConfigureProviderConfig = cfg + return c.ConfigureProviderError +} + +func (c *MockEvalContext) SetProviderConfig( + n string, cfg *ResourceConfig) error { + c.SetProviderConfigCalled = true + c.SetProviderConfigName = n + c.SetProviderConfigConfig = cfg + return nil +} + +func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig { + c.ParentProviderConfigCalled = true + c.ParentProviderConfigName = n + return c.ParentProviderConfigConfig +} + +func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { + c.ProviderInputCalled = true + c.ProviderInputName = n + return c.ProviderInputConfig +} + +func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { + c.SetProviderInputCalled = true + c.SetProviderInputName = n + c.SetProviderInputConfig = cfg +} + +func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { + c.InitProvisionerCalled = true + c.InitProvisionerName = n + return c.InitProvisionerProvisioner, c.InitProvisionerError +} + +func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { + c.ProvisionerCalled = true + c.ProvisionerName = n + return c.ProvisionerProvisioner +} + +func (c *MockEvalContext) CloseProvisioner(n string) error { + c.CloseProvisionerCalled = true + c.CloseProvisionerName = n + return nil +} + +func (c *MockEvalContext) Interpolate( + config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { + c.InterpolateCalled = true + c.InterpolateConfig = config + c.InterpolateResource = resource + return c.InterpolateConfigResult, c.InterpolateError +} + +func (c *MockEvalContext) Path() []string { + c.PathCalled = true + return c.PathPath +} + +func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { + c.SetVariablesCalled = true + c.SetVariablesModule = n + c.SetVariablesVariables = vs +} + +func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { + c.DiffCalled = true + return c.DiffDiff, c.DiffLock +} + +func (c *MockEvalContext) State() (*State, *sync.RWMutex) { + c.StateCalled = true + return c.StateState, c.StateLock +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go new file mode 100644 index 00000000..2ae56a75 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go @@ -0,0 +1,58 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" +) + +// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state +// when there is a resource count with zero/one boundary, i.e. fixing +// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. +type EvalCountFixZeroOneBoundary struct { + Resource *config.Resource +} + +// TODO: test +func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { + // Get the count, important for knowing whether we're supposed to + // be adding the zero, or trimming it. + count, err := n.Resource.Count() + if err != nil { + return nil, err + } + + // Figure what to look for and what to replace it with + hunt := n.Resource.Id() + replace := hunt + ".0" + if count < 2 { + hunt, replace = replace, hunt + } + + state, lock := ctx.State() + + // Get a lock so we can access this instance and potentially make + // changes to it. + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs, ok := mod.Resources[hunt] + if !ok { + return nil, nil + } + + // If the replacement key exists, we just keep both + if _, ok := mod.Resources[replace]; ok { + return nil, nil + } + + mod.Resources[replace] = rs + delete(mod.Resources, hunt) + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go new file mode 100644 index 00000000..6f09526a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -0,0 +1,478 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// EvalCompareDiff is an EvalNode implementation that compares two diffs +// and errors if the diffs are not equal. +type EvalCompareDiff struct { + Info *InstanceInfo + One, Two **InstanceDiff +} + +// TODO: test +func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { + one, two := *n.One, *n.Two + + // If either are nil, let them be empty + if one == nil { + one = new(InstanceDiff) + one.init() + } + if two == nil { + two = new(InstanceDiff) + two.init() + } + oneId, _ := one.GetAttribute("id") + twoId, _ := two.GetAttribute("id") + one.DelAttribute("id") + two.DelAttribute("id") + defer func() { + if oneId != nil { + one.SetAttribute("id", oneId) + } + if twoId != nil { + two.SetAttribute("id", twoId) + } + }() + + if same, reason := one.Same(two); !same { + log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) + log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) + log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) + log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) + return nil, fmt.Errorf( + "%s: diffs didn't match during apply. This is a bug with "+ + "Terraform and should be reported as a GitHub Issue.\n"+ + "\n"+ + "Please include the following information in your report:\n"+ + "\n"+ + " Terraform Version: %s\n"+ + " Resource ID: %s\n"+ + " Mismatch reason: %s\n"+ + " Diff One (usually from plan): %#v\n"+ + " Diff Two (usually from apply): %#v\n"+ + "\n"+ + "Also include as much context as you can about your config, state, "+ + "and the steps you performed to trigger this error.\n", + n.Info.Id, Version, n.Info.Id, reason, one, two) + } + + return nil, nil +} + +// EvalDiff is an EvalNode implementation that does a refresh for +// a resource. +type EvalDiff struct { + Name string + Info *InstanceInfo + Config **ResourceConfig + Provider *ResourceProvider + Diff **InstanceDiff + State **InstanceState + OutputDiff **InstanceDiff + OutputState **InstanceState + + // Resource is needed to fetch the ignore_changes list so we can + // filter user-requested ignored attributes from the diff. + Resource *config.Resource +} + +// TODO: test +func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + config := *n.Config + provider := *n.Provider + + // Call pre-diff hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, state) + }) + if err != nil { + return nil, err + } + + // The state for the diff must never be nil + diffState := state + if diffState == nil { + diffState = new(InstanceState) + } + diffState.init() + + // Diff! + diff, err := provider.Diff(n.Info, diffState, config) + if err != nil { + return nil, err + } + if diff == nil { + diff = new(InstanceDiff) + } + + // Set DestroyDeposed if we have deposed instances + _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { + if len(rs.Deposed) > 0 { + diff.DestroyDeposed = true + } + + return nil, nil + }) + if err != nil { + return nil, err + } + + // Preserve the DestroyTainted flag + if n.Diff != nil { + diff.SetTainted((*n.Diff).GetDestroyTainted()) + } + + // Require a destroy if there is an ID and it requires new. + if diff.RequiresNew() && state != nil && state.ID != "" { + diff.SetDestroy(true) + } + + // If we're creating a new resource, compute its ID + if diff.RequiresNew() || state == nil || state.ID == "" { + var oldID string + if state != nil { + oldID = state.Attributes["id"] + } + + // Add diff to compute new ID + diff.init() + diff.SetAttribute("id", &ResourceAttrDiff{ + Old: oldID, + NewComputed: true, + RequiresNew: true, + Type: DiffAttrOutput, + }) + } + + // filter out ignored resources + if err := n.processIgnoreChanges(diff); err != nil { + return nil, err + } + + // Call post-refresh hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } + + // Update our output + *n.OutputDiff = diff + + // Update the state if we care + if n.OutputState != nil { + *n.OutputState = state + + // Merge our state so that the state is updated with our plan + if !diff.Empty() && n.OutputState != nil { + *n.OutputState = state.MergeDiff(diff) + } + } + + return nil, nil +} + +func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { + if diff == nil || n.Resource == nil || n.Resource.Id() == "" { + return nil + } + ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + + if len(ignoreChanges) == 0 { + return nil + } + + // If we're just creating the resource, we shouldn't alter the + // Diff at all + if diff.ChangeType() == DiffCreate { + return nil + } + + // If the resource has been tainted then we don't process ignore changes + // since we MUST recreate the entire resource. + if diff.GetDestroyTainted() { + return nil + } + + attrs := diff.CopyAttributes() + + // get the complete set of keys we want to ignore + ignorableAttrKeys := make(map[string]bool) + for _, ignoredKey := range ignoreChanges { + for k := range attrs { + if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { + ignorableAttrKeys[k] = true + } + } + } + + // If the resource was being destroyed, check to see if we can ignore the + // reason for it being destroyed. + if diff.GetDestroy() { + for k, v := range attrs { + if k == "id" { + // id will always be changed if we intended to replace this instance + continue + } + if v.Empty() || v.NewComputed { + continue + } + + // If any RequiresNew attribute isn't ignored, we need to keep the diff + // as-is to be able to replace the resource. + if v.RequiresNew && !ignorableAttrKeys[k] { + return nil + } + } + + // Now that we know that we aren't replacing the instance, we can filter + // out all the empty and computed attributes. There may be a bunch of + // extraneous attribute diffs for the other non-requires-new attributes + // going from "" -> "configval" or "" -> "". + // We must make sure any flatmapped containers are filterred (or not) as a + // whole. + containers := groupContainers(diff) + keep := map[string]bool{} + for _, v := range containers { + if v.keepDiff() { + // At least one key has changes, so list all the sibling keys + // to keep in the diff. + for k := range v { + keep[k] = true + } + } + } + + for k, v := range attrs { + if (v.Empty() || v.NewComputed) && !keep[k] { + ignorableAttrKeys[k] = true + } + } + } + + // Here we undo the two reactions to RequireNew in EvalDiff - the "id" + // attribute diff and the Destroy boolean field + log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + + "because after ignore_changes, this diff no longer requires replacement") + diff.DelAttribute("id") + diff.SetDestroy(false) + + // If we didn't hit any of our early exit conditions, we can filter the diff. + for k := range ignorableAttrKeys { + log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", + n.Resource.Id(), k) + diff.DelAttribute(k) + } + + return nil +} + +// a group of key-*ResourceAttrDiff pairs from the same flatmapped container +type flatAttrDiff map[string]*ResourceAttrDiff + +// we need to keep all keys if any of them have a diff +func (f flatAttrDiff) keepDiff() bool { + for _, v := range f { + if !v.Empty() && !v.NewComputed { + return true + } + } + return false +} + +// sets, lists and maps need to be compared for diff inclusion as a whole, so +// group the flatmapped keys together for easier comparison. +func groupContainers(d *InstanceDiff) map[string]flatAttrDiff { + isIndex := multiVal.MatchString + containers := map[string]flatAttrDiff{} + attrs := d.CopyAttributes() + // we need to loop once to find the index key + for k := range attrs { + if isIndex(k) { + // add the key, always including the final dot to fully qualify it + containers[k[:len(k)-1]] = flatAttrDiff{} + } + } + + // loop again to find all the sub keys + for prefix, values := range containers { + for k, attrDiff := range attrs { + // we include the index value as well, since it could be part of the diff + if strings.HasPrefix(k, prefix) { + values[k] = attrDiff + } + } + } + + return containers +} + +// EvalDiffDestroy is an EvalNode implementation that returns a plain +// destroy diff. +type EvalDiffDestroy struct { + Info *InstanceInfo + State **InstanceState + Output **InstanceDiff +} + +// TODO: test +func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + // If there is no state or we don't have an ID, we're already destroyed + if state == nil || state.ID == "" { + return nil, nil + } + + // Call pre-diff hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, state) + }) + if err != nil { + return nil, err + } + + // The diff + diff := &InstanceDiff{Destroy: true} + + // Call post-diff hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } + + // Update our output + *n.Output = diff + + return nil, nil +} + +// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to +// the full diff. +type EvalDiffDestroyModule struct { + Path []string +} + +// TODO: test +func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) { + diff, lock := ctx.Diff() + + // Acquire the lock so that we can do this safely concurrently + lock.Lock() + defer lock.Unlock() + + // Write the diff + modDiff := diff.ModuleByPath(n.Path) + if modDiff == nil { + modDiff = diff.AddModule(n.Path) + } + modDiff.Destroy = true + + return nil, nil +} + +// EvalFilterDiff is an EvalNode implementation that filters the diff +// according to some filter. +type EvalFilterDiff struct { + // Input and output + Diff **InstanceDiff + Output **InstanceDiff + + // Destroy, if true, will only include a destroy diff if it is set. + Destroy bool +} + +func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { + if *n.Diff == nil { + return nil, nil + } + + input := *n.Diff + result := new(InstanceDiff) + + if n.Destroy { + if input.GetDestroy() || input.RequiresNew() { + result.SetDestroy(true) + } + } + + if n.Output != nil { + *n.Output = result + } + + return nil, nil +} + +// EvalReadDiff is an EvalNode implementation that writes the diff to +// the full diff. +type EvalReadDiff struct { + Name string + Diff **InstanceDiff +} + +func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { + diff, lock := ctx.Diff() + + // Acquire the lock so that we can do this safely concurrently + lock.Lock() + defer lock.Unlock() + + // Write the diff + modDiff := diff.ModuleByPath(ctx.Path()) + if modDiff == nil { + return nil, nil + } + + *n.Diff = modDiff.Resources[n.Name] + + return nil, nil +} + +// EvalWriteDiff is an EvalNode implementation that writes the diff to +// the full diff. +type EvalWriteDiff struct { + Name string + Diff **InstanceDiff +} + +// TODO: test +func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { + diff, lock := ctx.Diff() + + // The diff to write, if its empty it should write nil + var diffVal *InstanceDiff + if n.Diff != nil { + diffVal = *n.Diff + } + if diffVal.Empty() { + diffVal = nil + } + + // Acquire the lock so that we can do this safely concurrently + lock.Lock() + defer lock.Unlock() + + // Write the diff + modDiff := diff.ModuleByPath(ctx.Path()) + if modDiff == nil { + modDiff = diff.AddModule(ctx.Path()) + } + if diffVal != nil { + modDiff.Resources[n.Name] = diffVal + } else { + delete(modDiff.Resources, n.Name) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go new file mode 100644 index 00000000..470f798b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go @@ -0,0 +1,20 @@ +package terraform + +// EvalReturnError is an EvalNode implementation that returns an +// error if it is present. +// +// This is useful for scenarios where an error has been captured by +// another EvalNode (like EvalApply) for special EvalTree-based error +// handling, and that handling has completed, so the error should be +// returned normally. +type EvalReturnError struct { + Error *error +} + +func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { + if n.Error == nil { + return nil, nil + } + + return nil, *n.Error +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go new file mode 100644 index 00000000..711c625c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go @@ -0,0 +1,25 @@ +package terraform + +// EvalNodeFilterFunc is the callback used to replace a node with +// another to node. To not do the replacement, just return the input node. +type EvalNodeFilterFunc func(EvalNode) EvalNode + +// EvalNodeFilterable is an interface that can be implemented by +// EvalNodes to allow filtering of sub-elements. Note that this isn't +// a common thing to implement and you probably don't need it. +type EvalNodeFilterable interface { + EvalNode + Filter(EvalNodeFilterFunc) +} + +// EvalFilter runs the filter on the given node and returns the +// final filtered value. This should be called rather than checking +// the EvalNode directly since this will properly handle EvalNodeFilterables. +func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { + if f, ok := node.(EvalNodeFilterable); ok { + f.Filter(fn) + return node + } + + return fn(node) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go new file mode 100644 index 00000000..1a55f024 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go @@ -0,0 +1,49 @@ +package terraform + +// EvalNodeOpFilterable is an interface that EvalNodes can implement +// to be filterable by the operation that is being run on Terraform. +type EvalNodeOpFilterable interface { + IncludeInOp(walkOperation) bool +} + +// EvalNodeFilterOp returns a filter function that filters nodes that +// include themselves in specific operations. +func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { + return func(n EvalNode) EvalNode { + include := true + if of, ok := n.(EvalNodeOpFilterable); ok { + include = of.IncludeInOp(op) + } + if include { + return n + } + + return EvalNoop{} + } +} + +// EvalOpFilter is an EvalNode implementation that is a proxy to +// another node but filters based on the operation. +type EvalOpFilter struct { + // Ops is the list of operations to include this node in. + Ops []walkOperation + + // Node is the node to execute + Node EvalNode +} + +// TODO: test +func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { + return EvalRaw(n.Node, ctx) +} + +// EvalNodeOpFilterable impl. +func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { + for _, v := range n.Ops { + if v == op { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go new file mode 100644 index 00000000..d6b46a1f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go @@ -0,0 +1,26 @@ +package terraform + +// EvalIf is an EvalNode that is a conditional. +type EvalIf struct { + If func(EvalContext) (bool, error) + Then EvalNode + Else EvalNode +} + +// TODO: test +func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { + yes, err := n.If(ctx) + if err != nil { + return nil, err + } + + if yes { + return EvalRaw(n.Then, ctx) + } else { + if n.Else != nil { + return EvalRaw(n.Else, ctx) + } + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go new file mode 100644 index 00000000..62cc581f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go @@ -0,0 +1,76 @@ +package terraform + +import ( + "fmt" +) + +// EvalImportState is an EvalNode implementation that performs an +// ImportState operation on a provider. This will return the imported +// states but won't modify any actual state. +type EvalImportState struct { + Provider *ResourceProvider + Info *InstanceInfo + Id string + Output *[]*InstanceState +} + +// TODO: test +func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + + { + // Call pre-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreImportState(n.Info, n.Id) + }) + if err != nil { + return nil, err + } + } + + // Import! + state, err := provider.ImportState(n.Info, n.Id) + if err != nil { + return nil, fmt.Errorf( + "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) + } + + if n.Output != nil { + *n.Output = state + } + + { + // Call post-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostImportState(n.Info, state) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalImportStateVerify verifies the state after ImportState and +// after the refresh to make sure it is non-nil and valid. +type EvalImportStateVerify struct { + Info *InstanceInfo + Id string + State **InstanceState +} + +// TODO: test +func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + if state.Empty() { + return nil, fmt.Errorf( + "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ + "exist. Please verify the ID is correct. You cannot import non-existent\n"+ + "resources using Terraform import.", + n.Info.HumanId(), + n.Id) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go new file mode 100644 index 00000000..6825ff59 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go @@ -0,0 +1,24 @@ +package terraform + +import "github.com/hashicorp/terraform/config" + +// EvalInterpolate is an EvalNode implementation that takes a raw +// configuration and interpolates it. +type EvalInterpolate struct { + Config *config.RawConfig + Resource *Resource + Output **ResourceConfig +} + +func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { + rc, err := ctx.Interpolate(n.Config, n.Resource) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = rc + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go new file mode 100644 index 00000000..f4bc8225 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go @@ -0,0 +1,8 @@ +package terraform + +// EvalNoop is an EvalNode that does nothing. +type EvalNoop struct{} + +func (EvalNoop) Eval(EvalContext) (interface{}, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go new file mode 100644 index 00000000..cf61781e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -0,0 +1,119 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config" +) + +// EvalDeleteOutput is an EvalNode implementation that deletes an output +// from the state. +type EvalDeleteOutput struct { + Name string +} + +// TODO: test +func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + delete(mod.Outputs, n.Name) + + return nil, nil +} + +// EvalWriteOutput is an EvalNode implementation that writes the output +// for the given name to the current state. +type EvalWriteOutput struct { + Name string + Sensitive bool + Value *config.RawConfig +} + +// TODO: test +func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + cfg, err := ctx.Interpolate(n.Value, nil) + if err != nil { + // Log error but continue anyway + log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) + } + + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write state to nil state") + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Get the value from the config + var valueRaw interface{} = config.UnknownVariableValue + if cfg != nil { + var ok bool + valueRaw, ok = cfg.Get("value") + if !ok { + valueRaw = "" + } + if cfg.IsComputed("value") { + valueRaw = config.UnknownVariableValue + } + } + + switch valueTyped := valueRaw.(type) { + case string: + mod.Outputs[n.Name] = &OutputState{ + Type: "string", + Sensitive: n.Sensitive, + Value: valueTyped, + } + case []interface{}: + mod.Outputs[n.Name] = &OutputState{ + Type: "list", + Sensitive: n.Sensitive, + Value: valueTyped, + } + case map[string]interface{}: + mod.Outputs[n.Name] = &OutputState{ + Type: "map", + Sensitive: n.Sensitive, + Value: valueTyped, + } + case []map[string]interface{}: + // an HCL map is multi-valued, so if this was read out of a config the + // map may still be in a slice. + if len(valueTyped) == 1 { + mod.Outputs[n.Name] = &OutputState{ + Type: "map", + Sensitive: n.Sensitive, + Value: valueTyped[0], + } + break + } + return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map", + n.Name, valueTyped, len(valueTyped)) + default: + return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go new file mode 100644 index 00000000..092fd18d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go @@ -0,0 +1,164 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalSetProviderConfig sets the parent configuration for a provider +// without configuring that provider, validating it, etc. +type EvalSetProviderConfig struct { + Provider string + Config **ResourceConfig +} + +func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) { + return nil, ctx.SetProviderConfig(n.Provider, *n.Config) +} + +// EvalBuildProviderConfig outputs a *ResourceConfig that is properly +// merged with parents and inputs on top of what is configured in the file. +type EvalBuildProviderConfig struct { + Provider string + Config **ResourceConfig + Output **ResourceConfig +} + +func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { + cfg := *n.Config + + // If we have a configuration set, then merge that in + if input := ctx.ProviderInput(n.Provider); input != nil { + // "input" is a map of the subset of config values that were known + // during the input walk, set by EvalInputProvider. Note that + // in particular it does *not* include attributes that had + // computed values at input time; those appear *only* in + // "cfg" here. + rc, err := config.NewRawConfig(input) + if err != nil { + return nil, err + } + + merged := cfg.raw.Merge(rc) + cfg = NewResourceConfig(merged) + } + + // Get the parent configuration if there is one + if parent := ctx.ParentProviderConfig(n.Provider); parent != nil { + merged := cfg.raw.Merge(parent.raw) + cfg = NewResourceConfig(merged) + } + + *n.Output = cfg + return nil, nil +} + +// EvalConfigProvider is an EvalNode implementation that configures +// a provider that is already initialized and retrieved. +type EvalConfigProvider struct { + Provider string + Config **ResourceConfig +} + +func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { + return nil, ctx.ConfigureProvider(n.Provider, *n.Config) +} + +// EvalInitProvider is an EvalNode implementation that initializes a provider +// and returns nothing. The provider can be retrieved again with the +// EvalGetProvider node. +type EvalInitProvider struct { + Name string +} + +func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvider(n.Name) +} + +// EvalCloseProvider is an EvalNode implementation that closes provider +// connections that aren't needed anymore. +type EvalCloseProvider struct { + Name string +} + +func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvider(n.Name) + return nil, nil +} + +// EvalGetProvider is an EvalNode implementation that retrieves an already +// initialized provider instance for the given name. +type EvalGetProvider struct { + Name string + Output *ResourceProvider +} + +func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { + result := ctx.Provider(n.Name) + if result == nil { + return nil, fmt.Errorf("provider %s not initialized", n.Name) + } + + if n.Output != nil { + *n.Output = result + } + + return nil, nil +} + +// EvalInputProvider is an EvalNode implementation that asks for input +// for the given provider configurations. +type EvalInputProvider struct { + Name string + Provider *ResourceProvider + Config **ResourceConfig +} + +func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { + // If we already configured this provider, then don't do this again + if v := ctx.ProviderInput(n.Name); v != nil { + return nil, nil + } + + rc := *n.Config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: fmt.Sprintf("provider.%s", n.Name), + QueryPrefix: fmt.Sprintf("provider.%s.", n.Name), + UIInput: ctx.Input(), + } + + // Go through each provider and capture the input necessary + // to satisfy it. + config, err := (*n.Provider).Input(input, rc) + if err != nil { + return nil, fmt.Errorf( + "Error configuring %s: %s", n.Name, err) + } + + // Set the input that we received so that child modules don't attempt + // to ask for input again. + if config != nil && len(config.Config) > 0 { + // This repository of provider input results on the context doesn't + // retain config.ComputedKeys, so we need to filter those out here + // in order that later users of this data won't try to use the unknown + // value placeholder as if it were a literal value. This map is just + // of known values we've been able to complete so far; dynamic stuff + // will be merged in by EvalBuildProviderConfig on subsequent + // (post-input) walks. + confMap := config.Config + if config.ComputedKeys != nil { + for _, key := range config.ComputedKeys { + delete(confMap, key) + } + } + + ctx.SetProviderInput(n.Name, confMap) + } else { + ctx.SetProviderInput(n.Name, map[string]interface{}{}) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go new file mode 100644 index 00000000..89579c05 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go @@ -0,0 +1,47 @@ +package terraform + +import ( + "fmt" +) + +// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner +// and returns nothing. The provisioner can be retrieved again with the +// EvalGetProvisioner node. +type EvalInitProvisioner struct { + Name string +} + +func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvisioner(n.Name) +} + +// EvalCloseProvisioner is an EvalNode implementation that closes provisioner +// connections that aren't needed anymore. +type EvalCloseProvisioner struct { + Name string +} + +func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvisioner(n.Name) + return nil, nil +} + +// EvalGetProvisioner is an EvalNode implementation that retrieves an already +// initialized provisioner instance for the given name. +type EvalGetProvisioner struct { + Name string + Output *ResourceProvisioner +} + +func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { + result := ctx.Provisioner(n.Name) + if result == nil { + return nil, fmt.Errorf("provisioner %s not initialized", n.Name) + } + + if n.Output != nil { + *n.Output = result + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go new file mode 100644 index 00000000..fb85a284 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go @@ -0,0 +1,139 @@ +package terraform + +import ( + "fmt" +) + +// EvalReadDataDiff is an EvalNode implementation that executes a data +// resource's ReadDataDiff method to discover what attributes it exports. +type EvalReadDataDiff struct { + Provider *ResourceProvider + Output **InstanceDiff + OutputState **InstanceState + Config **ResourceConfig + Info *InstanceInfo + + // Set Previous when re-evaluating diff during apply, to ensure that + // the "Destroy" flag is preserved. + Previous **InstanceDiff +} + +func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { + // TODO: test + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, nil) + }) + if err != nil { + return nil, err + } + + var diff *InstanceDiff + + if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { + // If we're re-diffing for a diff that was already planning to + // destroy, then we'll just continue with that plan. + diff = &InstanceDiff{Destroy: true} + } else { + provider := *n.Provider + config := *n.Config + + var err error + diff, err = provider.ReadDataDiff(n.Info, config) + if err != nil { + return nil, err + } + if diff == nil { + diff = new(InstanceDiff) + } + + // if id isn't explicitly set then it's always computed, because we're + // always "creating a new resource". + diff.init() + if _, ok := diff.Attributes["id"]; !ok { + diff.SetAttribute("id", &ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + Type: DiffAttrOutput, + }) + } + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } + + *n.Output = diff + + if n.OutputState != nil { + state := &InstanceState{} + *n.OutputState = state + + // Apply the diff to the returned state, so the state includes + // any attribute values that are not computed. + if !diff.Empty() && n.OutputState != nil { + *n.OutputState = state.MergeDiff(diff) + } + } + + return nil, nil +} + +// EvalReadDataApply is an EvalNode implementation that executes a data +// resource's ReadDataApply method to read data from the data source. +type EvalReadDataApply struct { + Provider *ResourceProvider + Output **InstanceState + Diff **InstanceDiff + Info *InstanceInfo +} + +func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { + // TODO: test + provider := *n.Provider + diff := *n.Diff + + // If the diff is for *destroying* this resource then we'll + // just drop its state and move on, since data resources don't + // support an actual "destroy" action. + if diff != nil && diff.GetDestroy() { + if n.Output != nil { + *n.Output = nil + } + return nil, nil + } + + // For the purpose of external hooks we present a data apply as a + // "Refresh" rather than an "Apply" because creating a data source + // is presented to users/callers as a "read" operation. + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(n.Info, &InstanceState{}) + }) + if err != nil { + return nil, err + } + + state, err := provider.ReadDataApply(n.Info, diff) + if err != nil { + return nil, fmt.Errorf("%s: %s", n.Info.Id, err) + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(n.Info, state) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = state + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go new file mode 100644 index 00000000..fa2b8126 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go @@ -0,0 +1,55 @@ +package terraform + +import ( + "fmt" + "log" +) + +// EvalRefresh is an EvalNode implementation that does a refresh for +// a resource. +type EvalRefresh struct { + Provider *ResourceProvider + State **InstanceState + Info *InstanceInfo + Output **InstanceState +} + +// TODO: test +func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + state := *n.State + + // If we have no state, we don't do any refreshing + if state == nil { + log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) + return nil, nil + } + + // Call pre-refresh hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreRefresh(n.Info, state) + }) + if err != nil { + return nil, err + } + + // Refresh! + state, err = provider.Refresh(n.Info, state) + if err != nil { + return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) + } + + // Call post-refresh hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(n.Info, state) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = state + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go new file mode 100644 index 00000000..5eca6782 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go @@ -0,0 +1,13 @@ +package terraform + +// EvalInstanceInfo is an EvalNode implementation that fills in the +// InstanceInfo as much as it can. +type EvalInstanceInfo struct { + Info *InstanceInfo +} + +// TODO: test +func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) { + n.Info.ModulePath = ctx.Path() + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go new file mode 100644 index 00000000..82d81782 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go @@ -0,0 +1,27 @@ +package terraform + +// EvalSequence is an EvalNode that evaluates in sequence. +type EvalSequence struct { + Nodes []EvalNode +} + +func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { + for _, n := range n.Nodes { + if n == nil { + continue + } + + if _, err := EvalRaw(n, ctx); err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalNodeFilterable impl. +func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { + for i, node := range n.Nodes { + n.Nodes[i] = fn(node) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go new file mode 100644 index 00000000..126a0e63 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -0,0 +1,324 @@ +package terraform + +import "fmt" + +// EvalReadState is an EvalNode implementation that reads the +// primary InstanceState for a specific resource out of the state. +type EvalReadState struct { + Name string + Output **InstanceState +} + +func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { + return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { + return rs.Primary, nil + }) +} + +// EvalReadStateDeposed is an EvalNode implementation that reads the +// deposed InstanceState for a specific resource out of the state +type EvalReadStateDeposed struct { + Name string + Output **InstanceState + // Index indicates which instance in the Deposed list to target, or -1 for + // the last item. + Index int +} + +func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { + // Get the index. If it is negative, then we get the last one + idx := n.Index + if idx < 0 { + idx = len(rs.Deposed) - 1 + } + if idx >= 0 && idx < len(rs.Deposed) { + return rs.Deposed[idx], nil + } else { + return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs) + } + }) +} + +// Does the bulk of the work for the various flavors of ReadState eval nodes. +// Each node just provides a reader function to get from the ResourceState to the +// InstanceState, and this takes care of all the plumbing. +func readInstanceFromState( + ctx EvalContext, + resourceName string, + output **InstanceState, + readerFn func(*ResourceState) (*InstanceState, error), +) (*InstanceState, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[resourceName] + if rs == nil { + return nil, nil + } + + // Use the delegate function to get the instance state from the resource state + is, err := readerFn(rs) + if err != nil { + return nil, err + } + + // Write the result to the output pointer + if output != nil { + *output = is + } + + return is, nil +} + +// EvalRequireState is an EvalNode implementation that early exits +// if the state doesn't have an ID. +type EvalRequireState struct { + State **InstanceState +} + +func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + return nil, EvalEarlyExitError{} + } + + state := *n.State + if state == nil || state.ID == "" { + return nil, EvalEarlyExitError{} + } + + return nil, nil +} + +// EvalUpdateStateHook is an EvalNode implementation that calls the +// PostStateUpdate hook with the current state. +type EvalUpdateStateHook struct{} + +func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a full lock. Even calling something like WriteState can modify + // (prune) the state, so we need the full lock. + lock.Lock() + defer lock.Unlock() + + // Call the hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostStateUpdate(state) + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +// EvalWriteState is an EvalNode implementation that writes the +// primary InstanceState for a specific resource into the state. +type EvalWriteState struct { + Name string + ResourceType string + Provider string + Dependencies []string + State **InstanceState +} + +func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { + return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, + func(rs *ResourceState) error { + rs.Primary = *n.State + return nil + }, + ) +} + +// EvalWriteStateDeposed is an EvalNode implementation that writes +// an InstanceState out to the Deposed list of a resource in the state. +type EvalWriteStateDeposed struct { + Name string + ResourceType string + Provider string + Dependencies []string + State **InstanceState + // Index indicates which instance in the Deposed list to target, or -1 to append. + Index int +} + +func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, + func(rs *ResourceState) error { + if n.Index == -1 { + rs.Deposed = append(rs.Deposed, *n.State) + } else { + rs.Deposed[n.Index] = *n.State + } + return nil + }, + ) +} + +// Pulls together the common tasks of the EvalWriteState nodes. All the args +// are passed directly down from the EvalNode along with a `writer` function +// which is yielded the *ResourceState and is responsible for writing an +// InstanceState to the proper field in the ResourceState. +func writeInstanceToState( + ctx EvalContext, + resourceName string, + resourceType string, + provider string, + dependencies []string, + writerFn func(*ResourceState) error, +) (*InstanceState, error) { + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write state to nil state") + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Look for the resource state. + rs := mod.Resources[resourceName] + if rs == nil { + rs = &ResourceState{} + rs.init() + mod.Resources[resourceName] = rs + } + rs.Type = resourceType + rs.Dependencies = dependencies + rs.Provider = provider + + if err := writerFn(rs); err != nil { + return nil, err + } + + return nil, nil +} + +// EvalClearPrimaryState is an EvalNode implementation that clears the primary +// instance from a resource state. +type EvalClearPrimaryState struct { + Name string +} + +func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[n.Name] + if rs == nil { + return nil, nil + } + + // Clear primary from the resource state + rs.Primary = nil + + return nil, nil +} + +// EvalDeposeState is an EvalNode implementation that takes the primary +// out of a state and makes it Deposed. This is done at the beginning of +// create-before-destroy calls so that the create can create while preserving +// the old state of the to-be-destroyed resource. +type EvalDeposeState struct { + Name string +} + +// TODO: test +func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[n.Name] + if rs == nil { + return nil, nil + } + + // If we don't have a primary, we have nothing to depose + if rs.Primary == nil { + return nil, nil + } + + // Depose + rs.Deposed = append(rs.Deposed, rs.Primary) + rs.Primary = nil + + return nil, nil +} + +// EvalUndeposeState is an EvalNode implementation that reads the +// InstanceState for a specific resource out of the state. +type EvalUndeposeState struct { + Name string + State **InstanceState +} + +// TODO: test +func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[n.Name] + if rs == nil { + return nil, nil + } + + // If we don't have any desposed resource, then we don't have anything to do + if len(rs.Deposed) == 0 { + return nil, nil + } + + // Undepose + idx := len(rs.Deposed) - 1 + rs.Primary = rs.Deposed[idx] + rs.Deposed[idx] = *n.State + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go new file mode 100644 index 00000000..478aa640 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -0,0 +1,227 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/mitchellh/mapstructure" +) + +// EvalValidateError is the error structure returned if there were +// validation errors. +type EvalValidateError struct { + Warnings []string + Errors []error +} + +func (e *EvalValidateError) Error() string { + return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors) +} + +// EvalValidateCount is an EvalNode implementation that validates +// the count of a resource. +type EvalValidateCount struct { + Resource *config.Resource +} + +// TODO: test +func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { + var count int + var errs []error + var err error + if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { + errs = append(errs, fmt.Errorf( + "Failed to interpolate count: %s", err)) + goto RETURN + } + + count, err = n.Resource.Count() + if err != nil { + // If we can't get the count during validation, then + // just replace it with the number 1. + c := n.Resource.RawCount.Config() + c[n.Resource.RawCount.Key] = "1" + count = 1 + } + err = nil + + if count < 0 { + errs = append(errs, fmt.Errorf( + "Count is less than zero: %d", count)) + } + +RETURN: + if len(errs) != 0 { + err = &EvalValidateError{ + Errors: errs, + } + } + return nil, err +} + +// EvalValidateProvider is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateProvider struct { + Provider *ResourceProvider + Config **ResourceConfig +} + +func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + config := *n.Config + + warns, errs := provider.Validate(config) + if len(warns) == 0 && len(errs) == 0 { + return nil, nil + } + + return nil, &EvalValidateError{ + Warnings: warns, + Errors: errs, + } +} + +// EvalValidateProvisioner is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateProvisioner struct { + Provisioner *ResourceProvisioner + Config **ResourceConfig + ConnConfig **ResourceConfig +} + +func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { + provisioner := *n.Provisioner + config := *n.Config + var warns []string + var errs []error + + { + // Validate the provisioner's own config first + w, e := provisioner.Validate(config) + warns = append(warns, w...) + errs = append(errs, e...) + } + + { + // Now validate the connection config, which might either be from + // the provisioner block itself or inherited from the resource's + // shared connection info. + w, e := n.validateConnConfig(*n.ConnConfig) + warns = append(warns, w...) + errs = append(errs, e...) + } + + if len(warns) == 0 && len(errs) == 0 { + return nil, nil + } + + return nil, &EvalValidateError{ + Warnings: warns, + Errors: errs, + } +} + +func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + + type connConfigSuperset struct { + // All attribute types are interface{} here because at this point we + // may still have unresolved interpolation expressions, which will + // appear as strings regardless of the final goal type. + + Type interface{} `mapstructure:"type"` + User interface{} `mapstructure:"user"` + Password interface{} `mapstructure:"password"` + Host interface{} `mapstructure:"host"` + Port interface{} `mapstructure:"port"` + Timeout interface{} `mapstructure:"timeout"` + ScriptPath interface{} `mapstructure:"script_path"` + + // For type=ssh only (enforced in ssh communicator) + PrivateKey interface{} `mapstructure:"private_key"` + Agent interface{} `mapstructure:"agent"` + BastionHost interface{} `mapstructure:"bastion_host"` + BastionPort interface{} `mapstructure:"bastion_port"` + BastionUser interface{} `mapstructure:"bastion_user"` + BastionPassword interface{} `mapstructure:"bastion_password"` + BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` + + // For type=winrm only (enforced in winrm communicator) + HTTPS interface{} `mapstructure:"https"` + Insecure interface{} `mapstructure:"insecure"` + CACert interface{} `mapstructure:"cacert"` + } + + var metadata mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: &metadata, + Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys + }) + if err != nil { + // should never happen + errs = append(errs, err) + return + } + + if err := decoder.Decode(connConfig.Config); err != nil { + errs = append(errs, err) + return + } + + for _, attrName := range metadata.Unused { + errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) + } + return +} + +// EvalValidateResource is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateResource struct { + Provider *ResourceProvider + Config **ResourceConfig + ResourceName string + ResourceType string + ResourceMode config.ResourceMode + + // IgnoreWarnings means that warnings will not be passed through. This allows + // "just-in-time" passes of validation to continue execution through warnings. + IgnoreWarnings bool +} + +func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + cfg := *n.Config + var warns []string + var errs []error + // Provider entry point varies depending on resource mode, because + // managed resources and data resources are two distinct concepts + // in the provider abstraction. + switch n.ResourceMode { + case config.ManagedResourceMode: + warns, errs = provider.ValidateResource(n.ResourceType, cfg) + case config.DataResourceMode: + warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) + } + + // If the resource name doesn't match the name regular + // expression, show an error. + if !config.NameRegexp.Match([]byte(n.ResourceName)) { + errs = append(errs, fmt.Errorf( + "%s: resource name can only contain letters, numbers, "+ + "dashes, and underscores.", n.ResourceName)) + } + + if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { + return nil, nil + } + + return nil, &EvalValidateError{ + Warnings: warns, + Errors: errs, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go new file mode 100644 index 00000000..e39a33c2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go @@ -0,0 +1,279 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/hilmapstructure" +) + +// EvalTypeCheckVariable is an EvalNode which ensures that the variable +// values which are assigned as inputs to a module (including the root) +// match the types which are either declared for the variables explicitly +// or inferred from the default values. +// +// In order to achieve this three things are required: +// - a map of the proposed variable values +// - the configuration tree of the module in which the variable is +// declared +// - the path to the module (so we know which part of the tree to +// compare the values against). +type EvalTypeCheckVariable struct { + Variables map[string]interface{} + ModulePath []string + ModuleTree *module.Tree +} + +func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { + currentTree := n.ModuleTree + for _, pathComponent := range n.ModulePath[1:] { + currentTree = currentTree.Children()[pathComponent] + } + targetConfig := currentTree.Config() + + prototypes := make(map[string]config.VariableType) + for _, variable := range targetConfig.Variables { + prototypes[variable.Name] = variable.Type() + } + + // Only display a module in an error message if we are not in the root module + modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], ".")) + if len(n.ModulePath) == 1 { + modulePathDescription = "" + } + + for name, declaredType := range prototypes { + proposedValue, ok := n.Variables[name] + if !ok { + // This means the default value should be used as no overriding value + // has been set. Therefore we should continue as no check is necessary. + continue + } + + if proposedValue == config.UnknownVariableValue { + continue + } + + switch declaredType { + case config.VariableTypeString: + switch proposedValue.(type) { + case string: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %s", + name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) + } + case config.VariableTypeMap: + switch proposedValue.(type) { + case map[string]interface{}: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %s", + name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) + } + case config.VariableTypeList: + switch proposedValue.(type) { + case []interface{}: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %s", + name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) + } + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got type string", + name, modulePathDescription, declaredType.Printable()) + } + } + + return nil, nil +} + +// EvalSetVariables is an EvalNode implementation that sets the variables +// explicitly for interpolation later. +type EvalSetVariables struct { + Module *string + Variables map[string]interface{} +} + +// TODO: test +func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { + ctx.SetVariables(*n.Module, n.Variables) + return nil, nil +} + +// EvalVariableBlock is an EvalNode implementation that evaluates the +// given configuration, and uses the final values as a way to set the +// mapping. +type EvalVariableBlock struct { + Config **ResourceConfig + VariableValues map[string]interface{} +} + +func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { + // Clear out the existing mapping + for k, _ := range n.VariableValues { + delete(n.VariableValues, k) + } + + // Get our configuration + rc := *n.Config + for k, v := range rc.Config { + vKind := reflect.ValueOf(v).Type().Kind() + + switch vKind { + case reflect.Slice: + var vSlice []interface{} + if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { + n.VariableValues[k] = vSlice + continue + } + case reflect.Map: + var vMap map[string]interface{} + if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { + n.VariableValues[k] = vMap + continue + } + default: + var vString string + if err := hilmapstructure.WeakDecode(v, &vString); err == nil { + n.VariableValues[k] = vString + continue + } + } + + return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) + } + + for _, path := range rc.ComputedKeys { + log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path) + err := n.setUnknownVariableValueForPath(path) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { + pathComponents := strings.Split(path, ".") + + if len(pathComponents) < 1 { + return fmt.Errorf("No path comoponents in %s", path) + } + + if len(pathComponents) == 1 { + // Special case the "top level" since we know the type + if _, ok := n.VariableValues[pathComponents[0]]; !ok { + n.VariableValues[pathComponents[0]] = config.UnknownVariableValue + } + return nil + } + + // Otherwise find the correct point in the tree and then set to unknown + var current interface{} = n.VariableValues[pathComponents[0]] + for i := 1; i < len(pathComponents); i++ { + switch tCurrent := current.(type) { + case []interface{}: + index, err := strconv.Atoi(pathComponents[i]) + if err != nil { + return fmt.Errorf("Cannot convert %s to slice index in path %s", + pathComponents[i], path) + } + current = tCurrent[index] + case []map[string]interface{}: + index, err := strconv.Atoi(pathComponents[i]) + if err != nil { + return fmt.Errorf("Cannot convert %s to slice index in path %s", + pathComponents[i], path) + } + current = tCurrent[index] + case map[string]interface{}: + if val, hasVal := tCurrent[pathComponents[i]]; hasVal { + current = val + continue + } + + tCurrent[pathComponents[i]] = config.UnknownVariableValue + break + } + } + + return nil +} + +// EvalCoerceMapVariable is an EvalNode implementation that recognizes a +// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a +// bare map literal is indistinguishable from a list of maps w/ one element. +// +// We take all the same inputs as EvalTypeCheckVariable above, since we need +// both the target type and the proposed value in order to properly coerce. +type EvalCoerceMapVariable struct { + Variables map[string]interface{} + ModulePath []string + ModuleTree *module.Tree +} + +// Eval implements the EvalNode interface. See EvalCoerceMapVariable for +// details. +func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) { + currentTree := n.ModuleTree + for _, pathComponent := range n.ModulePath[1:] { + currentTree = currentTree.Children()[pathComponent] + } + targetConfig := currentTree.Config() + + prototypes := make(map[string]config.VariableType) + for _, variable := range targetConfig.Variables { + prototypes[variable.Name] = variable.Type() + } + + for name, declaredType := range prototypes { + if declaredType != config.VariableTypeMap { + continue + } + + proposedValue, ok := n.Variables[name] + if !ok { + continue + } + + if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 { + if m, ok := list[0].(map[string]interface{}); ok { + log.Printf("[DEBUG] EvalCoerceMapVariable: "+ + "Coercing single element list into map: %#v", m) + n.Variables[name] = m + } + } + } + + return nil, nil +} + +// hclTypeName returns the name of the type that would represent this value in +// a config file, or falls back to the Go type name if there's no corresponding +// HCL type. This is used for formatted output, not for comparing types. +func hclTypeName(i interface{}) string { + switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k { + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return "number" + case reflect.Array, reflect.Slice: + return "list" + case reflect.Map: + return "map" + case reflect.String: + return "string" + default: + // fall back to the Go type if there's no match + return k.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go new file mode 100644 index 00000000..00392efe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go @@ -0,0 +1,119 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" +) + +// ProviderEvalTree returns the evaluation tree for initializing and +// configuring providers. +func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { + var provider ResourceProvider + var resourceConfig *ResourceConfig + + seq := make([]EvalNode, 0, 5) + seq = append(seq, &EvalInitProvider{Name: n}) + + // Input stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkInput, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n, + Output: &provider, + }, + &EvalInterpolate{ + Config: config, + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n, + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalInputProvider{ + Name: n, + Provider: &provider, + Config: &resourceConfig, + }, + }, + }, + }) + + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkValidate}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n, + Output: &provider, + }, + &EvalInterpolate{ + Config: config, + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n, + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalValidateProvider{ + Provider: &provider, + Config: &resourceConfig, + }, + &EvalSetProviderConfig{ + Provider: n, + Config: &resourceConfig, + }, + }, + }, + }) + + // Apply stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n, + Output: &provider, + }, + &EvalInterpolate{ + Config: config, + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n, + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalSetProviderConfig{ + Provider: n, + Config: &resourceConfig, + }, + }, + }, + }) + + // We configure on everything but validate, since validate may + // not have access to all the variables. + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalConfigProvider{ + Provider: n, + Config: &resourceConfig, + }, + }, + }, + }) + + return &EvalSequence{Nodes: seq} +} + +// CloseProviderEvalTree returns the evaluation tree for closing +// provider connections that aren't needed anymore. +func CloseProviderEvalTree(n string) EvalNode { + return &EvalCloseProvider{Name: n} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go new file mode 100644 index 00000000..48ce6a33 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -0,0 +1,172 @@ +package terraform + +import ( + "fmt" + "log" + "runtime/debug" + "strings" + + "github.com/hashicorp/terraform/dag" +) + +// RootModuleName is the name given to the root module implicitly. +const RootModuleName = "root" + +// RootModulePath is the path for the root module. +var RootModulePath = []string{RootModuleName} + +// Graph represents the graph that Terraform uses to represent resources +// and their dependencies. +type Graph struct { + // Graph is the actual DAG. This is embedded so you can call the DAG + // methods directly. + dag.AcyclicGraph + + // Path is the path in the module tree that this Graph represents. + // The root is represented by a single element list containing + // RootModuleName + Path []string + + // debugName is a name for reference in the debug output. This is usually + // to indicate what topmost builder was, and if this graph is a shadow or + // not. + debugName string +} + +func (g *Graph) DirectedGraph() dag.Grapher { + return &g.AcyclicGraph +} + +// Walk walks the graph with the given walker for callbacks. The graph +// will be walked with full parallelism, so the walker should expect +// to be called in concurrently. +func (g *Graph) Walk(walker GraphWalker) error { + return g.walk(walker) +} + +func (g *Graph) walk(walker GraphWalker) error { + // The callbacks for enter/exiting a graph + ctx := walker.EnterPath(g.Path) + defer walker.ExitPath(g.Path) + + // Get the path for logs + path := strings.Join(ctx.Path(), ".") + + // Determine if our walker is a panic wrapper + panicwrap, ok := walker.(GraphWalkerPanicwrapper) + if !ok { + panicwrap = nil // just to be sure + } + + debugName := "walk-graph.json" + if g.debugName != "" { + debugName = g.debugName + "-" + debugName + } + + debugBuf := dbug.NewFileWriter(debugName) + g.SetDebugWriter(debugBuf) + defer debugBuf.Close() + + // Walk the graph. + var walkFn dag.WalkFunc + walkFn = func(v dag.Vertex) (rerr error) { + log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v)) + g.DebugVisitInfo(v, g.debugName) + + // If we have a panic wrap GraphWalker and a panic occurs, recover + // and call that. We ensure the return value is an error, however, + // so that future nodes are not called. + defer func() { + // If no panicwrap, do nothing + if panicwrap == nil { + return + } + + // If no panic, do nothing + err := recover() + if err == nil { + return + } + + // Modify the return value to show the error + rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s", + dag.VertexName(v), err, debug.Stack()) + + // Call the panic wrapper + panicwrap.Panic(v, err) + }() + + walker.EnterVertex(v) + defer walker.ExitVertex(v, rerr) + + // vertexCtx is the context that we use when evaluating. This + // is normally the context of our graph but can be overridden + // with a GraphNodeSubPath impl. + vertexCtx := ctx + if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { + vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) + defer walker.ExitPath(pn.Path()) + } + + // If the node is eval-able, then evaluate it. + if ev, ok := v.(GraphNodeEvalable); ok { + tree := ev.EvalTree() + if tree == nil { + panic(fmt.Sprintf( + "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v)) + } + + // Allow the walker to change our tree if needed. Eval, + // then callback with the output. + log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) + + tree = walker.EnterEvalTree(v, tree) + output, err := Eval(tree, vertexCtx) + if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { + return + } + } + + // If the node is dynamically expanded, then expand it + if ev, ok := v.(GraphNodeDynamicExpandable); ok { + log.Printf( + "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph", + path, + dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) + + g, err := ev.DynamicExpand(vertexCtx) + if err != nil { + rerr = err + return + } + if g != nil { + // Walk the subgraph + if rerr = g.walk(walker); rerr != nil { + return + } + } + } + + // If the node has a subgraph, then walk the subgraph + if sn, ok := v.(GraphNodeSubgraph); ok { + log.Printf( + "[DEBUG] vertex '%s.%s': walking subgraph", + path, + dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) + + if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { + return + } + } + + return nil + } + + return g.AcyclicGraph.Walk(walkFn) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go new file mode 100644 index 00000000..6374bb90 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + "strings" +) + +// GraphBuilder is an interface that can be implemented and used with +// Terraform to build the graph that Terraform walks. +type GraphBuilder interface { + // Build builds the graph for the given module path. It is up to + // the interface implementation whether this build should expand + // the graph or not. + Build(path []string) (*Graph, error) +} + +// BasicGraphBuilder is a GraphBuilder that builds a graph out of a +// series of transforms and (optionally) validates the graph is a valid +// structure. +type BasicGraphBuilder struct { + Steps []GraphTransformer + Validate bool + // Optional name to add to the graph debug log + Name string +} + +func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { + g := &Graph{Path: path} + + debugName := "graph.json" + if b.Name != "" { + debugName = b.Name + "-" + debugName + } + debugBuf := dbug.NewFileWriter(debugName) + g.SetDebugWriter(debugBuf) + defer debugBuf.Close() + + for _, step := range b.Steps { + if step == nil { + continue + } + + stepName := fmt.Sprintf("%T", step) + dot := strings.LastIndex(stepName, ".") + if dot >= 0 { + stepName = stepName[dot+1:] + } + + debugOp := g.DebugOperation(stepName, "") + err := step.Transform(g) + + errMsg := "" + if err != nil { + errMsg = err.Error() + } + debugOp.End(errMsg) + + log.Printf( + "[TRACE] Graph after step %T:\n\n%s", + step, g.StringWithNodeTypes()) + + if err != nil { + return g, err + } + } + + // Validate the graph structure + if b.Validate { + if err := g.Validate(); err != nil { + log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) + return nil, err + } + } + + return g, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go new file mode 100644 index 00000000..7070c59e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go @@ -0,0 +1,76 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ImportGraphBuilder implements GraphBuilder and is responsible for building +// a graph for importing resources into Terraform. This is a much, much +// simpler graph than a normal configuration graph. +type ImportGraphBuilder struct { + // ImportTargets are the list of resources to import. + ImportTargets []*ImportTarget + + // Module is the module to add to the graph. See ImportOpts.Module. + Module *module.Tree + + // Providers is the list of providers supported. + Providers []string +} + +// Build builds the graph according to the steps returned by Steps. +func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "ImportGraphBuilder", + }).Build(path) +} + +// Steps returns the ordered list of GraphTransformers that must be executed +// to build a complete graph. +func (b *ImportGraphBuilder) Steps() []GraphTransformer { + // Get the module. If we don't have one, we just use an empty tree + // so that the transform still works but does nothing. + mod := b.Module + if mod == nil { + mod = module.NewEmptyTree() + } + + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Create all our resources from the configuration and state + &ConfigTransformer{Module: mod}, + + // Add the import steps + &ImportStateTransformer{Targets: b.ImportTargets}, + + // Provider-related transformations + &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, + &ProviderTransformer{}, + &DisableProviderTransformer{}, + &ParentProviderTransformer{}, + &AttachProviderConfigTransformer{Module: mod}, + + // This validates that the providers only depend on variables + &ImportProviderValidateTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Optimize + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go new file mode 100644 index 00000000..73e3821f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go @@ -0,0 +1,9 @@ +package terraform + +import "github.com/hashicorp/terraform/dag" + +// GraphDot returns the dot formatting of a visual representation of +// the given Terraform graph. +func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { + return string(g.Dot(opts)), nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go new file mode 100644 index 00000000..2897eb54 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go @@ -0,0 +1,7 @@ +package terraform + +// GraphNodeSubPath says that a node is part of a graph with a +// different path, and the context should be adjusted accordingly. +type GraphNodeSubPath interface { + Path() []string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go new file mode 100644 index 00000000..34ce6f64 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go @@ -0,0 +1,60 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// GraphWalker is an interface that can be implemented that when used +// with Graph.Walk will invoke the given callbacks under certain events. +type GraphWalker interface { + EnterPath([]string) EvalContext + ExitPath([]string) + EnterVertex(dag.Vertex) + ExitVertex(dag.Vertex, error) + EnterEvalTree(dag.Vertex, EvalNode) EvalNode + ExitEvalTree(dag.Vertex, interface{}, error) error +} + +// GrpahWalkerPanicwrapper can be optionally implemented to catch panics +// that occur while walking the graph. This is not generally recommended +// since panics should crash Terraform and result in a bug report. However, +// this is particularly useful for situations like the shadow graph where +// you don't ever want to cause a panic. +type GraphWalkerPanicwrapper interface { + GraphWalker + + // Panic is called when a panic occurs. This will halt the panic from + // propogating so if the walker wants it to crash still it should panic + // again. This is called from within a defer so runtime/debug.Stack can + // be used to get the stack trace of the panic. + Panic(dag.Vertex, interface{}) +} + +// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow +// the panics. This doesn't lose the panics since the panics are still +// returned as errors as part of a graph walk. +func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper { + return &graphWalkerPanicwrapper{ + GraphWalker: w, + } +} + +type graphWalkerPanicwrapper struct { + GraphWalker +} + +func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {} + +// NullGraphWalker is a GraphWalker implementation that does nothing. +// This can be embedded within other GraphWalker implementations for easily +// implementing all the required functions. +type NullGraphWalker struct{} + +func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath([]string) {} +func (NullGraphWalker) EnterVertex(dag.Vertex) {} +func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} +func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } +func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go new file mode 100644 index 00000000..e63b4603 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -0,0 +1,157 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/dag" +) + +// ContextGraphWalker is the GraphWalker implementation used with the +// Context struct to walk and evaluate the graph. +type ContextGraphWalker struct { + NullGraphWalker + + // Configurable values + Context *Context + Operation walkOperation + StopContext context.Context + + // Outputs, do not set these. Do not read these while the graph + // is being walked. + ValidationWarnings []string + ValidationErrors []error + + errorLock sync.Mutex + once sync.Once + contexts map[string]*BuiltinEvalContext + contextLock sync.Mutex + interpolaterVars map[string]map[string]interface{} + interpolaterVarLock sync.Mutex + providerCache map[string]ResourceProvider + providerConfigCache map[string]*ResourceConfig + providerLock sync.Mutex + provisionerCache map[string]ResourceProvisioner + provisionerLock sync.Mutex +} + +func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { + w.once.Do(w.init) + + w.contextLock.Lock() + defer w.contextLock.Unlock() + + // If we already have a context for this path cached, use that + key := PathCacheKey(path) + if ctx, ok := w.contexts[key]; ok { + return ctx + } + + // Setup the variables for this interpolater + variables := make(map[string]interface{}) + if len(path) <= 1 { + for k, v := range w.Context.variables { + variables[k] = v + } + } + w.interpolaterVarLock.Lock() + if m, ok := w.interpolaterVars[key]; ok { + for k, v := range m { + variables[k] = v + } + } + w.interpolaterVars[key] = variables + w.interpolaterVarLock.Unlock() + + ctx := &BuiltinEvalContext{ + StopContext: w.StopContext, + PathValue: path, + Hooks: w.Context.hooks, + InputValue: w.Context.uiInput, + Components: w.Context.components, + ProviderCache: w.providerCache, + ProviderConfigCache: w.providerConfigCache, + ProviderInputConfig: w.Context.providerInputConfig, + ProviderLock: &w.providerLock, + ProvisionerCache: w.provisionerCache, + ProvisionerLock: &w.provisionerLock, + DiffValue: w.Context.diff, + DiffLock: &w.Context.diffLock, + StateValue: w.Context.state, + StateLock: &w.Context.stateLock, + Interpolater: &Interpolater{ + Operation: w.Operation, + Meta: w.Context.meta, + Module: w.Context.module, + State: w.Context.state, + StateLock: &w.Context.stateLock, + VariableValues: variables, + VariableValuesLock: &w.interpolaterVarLock, + }, + InterpolaterVars: w.interpolaterVars, + InterpolaterVarLock: &w.interpolaterVarLock, + } + + w.contexts[key] = ctx + return ctx +} + +func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { + log.Printf("[TRACE] [%s] Entering eval tree: %s", + w.Operation, dag.VertexName(v)) + + // Acquire a lock on the semaphore + w.Context.parallelSem.Acquire() + + // We want to filter the evaluation tree to only include operations + // that belong in this operation. + return EvalFilter(n, EvalNodeFilterOp(w.Operation)) +} + +func (w *ContextGraphWalker) ExitEvalTree( + v dag.Vertex, output interface{}, err error) error { + log.Printf("[TRACE] [%s] Exiting eval tree: %s", + w.Operation, dag.VertexName(v)) + + // Release the semaphore + w.Context.parallelSem.Release() + + if err == nil { + return nil + } + + // Acquire the lock because anything is going to require a lock. + w.errorLock.Lock() + defer w.errorLock.Unlock() + + // Try to get a validation error out of it. If its not a validation + // error, then just record the normal error. + verr, ok := err.(*EvalValidateError) + if !ok { + return err + } + + for _, msg := range verr.Warnings { + w.ValidationWarnings = append( + w.ValidationWarnings, + fmt.Sprintf("%s: %s", dag.VertexName(v), msg)) + } + for _, e := range verr.Errors { + w.ValidationErrors = append( + w.ValidationErrors, + errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e)) + } + + return nil +} + +func (w *ContextGraphWalker) init() { + w.contexts = make(map[string]*BuiltinEvalContext, 5) + w.providerCache = make(map[string]ResourceProvider, 5) + w.providerConfigCache = make(map[string]*ResourceConfig, 5) + w.provisionerCache = make(map[string]ResourceProvisioner, 5) + w.interpolaterVars = make(map[string]map[string]interface{}, 5) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go new file mode 100644 index 00000000..3fb37481 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go @@ -0,0 +1,18 @@ +package terraform + +//go:generate stringer -type=walkOperation graph_walk_operation.go + +// walkOperation is an enum which tells the walkContext what to do. +type walkOperation byte + +const ( + walkInvalid walkOperation = iota + walkInput + walkApply + walkPlan + walkPlanDestroy + walkRefresh + walkValidate + walkDestroy + walkImport +) diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go new file mode 100644 index 00000000..ab11e8ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go @@ -0,0 +1,137 @@ +package terraform + +// HookAction is an enum of actions that can be taken as a result of a hook +// callback. This allows you to modify the behavior of Terraform at runtime. +type HookAction byte + +const ( + // HookActionContinue continues with processing as usual. + HookActionContinue HookAction = iota + + // HookActionHalt halts immediately: no more hooks are processed + // and the action that Terraform was about to take is cancelled. + HookActionHalt +) + +// Hook is the interface that must be implemented to hook into various +// parts of Terraform, allowing you to inspect or change behavior at runtime. +// +// There are MANY hook points into Terraform. If you only want to implement +// some hook points, but not all (which is the likely case), then embed the +// NilHook into your struct, which implements all of the interface but does +// nothing. Then, override only the functions you want to implement. +type Hook interface { + // PreApply and PostApply are called before and after a single + // resource is applied. The error argument in PostApply is the + // error, if any, that was returned from the provider Apply call itself. + PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) + PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a single resource + // resource is diffed. + PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) + PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) + + // Provisioning hooks + // + // All should be self-explanatory. ProvisionOutput is called with + // output sent back by the provisioners. This will be called multiple + // times as output comes in, but each call should represent a line of + // output. The ProvisionOutput method cannot control whether the + // hook continues running. + PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) + PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) + PreProvision(*InstanceInfo, string) (HookAction, error) + PostProvision(*InstanceInfo, string, error) (HookAction, error) + ProvisionOutput(*InstanceInfo, string, string) + + // PreRefresh and PostRefresh are called before and after a single + // resource state is refreshed, respectively. + PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) + PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) + + // PostStateUpdate is called after the state is updated. + PostStateUpdate(*State) (HookAction, error) + + // PreImportState and PostImportState are called before and after + // a single resource's state is being improted. + PreImportState(*InstanceInfo, string) (HookAction, error) + PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) +} + +// NilHook is a Hook implementation that does nothing. It exists only to +// simplify implementing hooks. You can embed this into your Hook implementation +// and only implement the functions you are interested in. +type NilHook struct{} + +func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) ProvisionOutput( + *InstanceInfo, string, string) { +} + +func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostStateUpdate(*State) (HookAction, error) { + return HookActionContinue, nil +} + +// handleHook turns hook actions into panics. This lets you use the +// panic/recover mechanism in Go as a flow control mechanism for hook +// actions. +func handleHook(a HookAction, err error) { + if err != nil { + // TODO: handle errors + } + + switch a { + case HookActionContinue: + return + case HookActionHalt: + panic(HookActionHalt) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go new file mode 100644 index 00000000..0e464006 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go @@ -0,0 +1,245 @@ +package terraform + +import "sync" + +// MockHook is an implementation of Hook that can be used for tests. +// It records all of its function calls. +type MockHook struct { + sync.Mutex + + PreApplyCalled bool + PreApplyInfo *InstanceInfo + PreApplyDiff *InstanceDiff + PreApplyState *InstanceState + PreApplyReturn HookAction + PreApplyError error + + PostApplyCalled bool + PostApplyInfo *InstanceInfo + PostApplyState *InstanceState + PostApplyError error + PostApplyReturn HookAction + PostApplyReturnError error + PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) + + PreDiffCalled bool + PreDiffInfo *InstanceInfo + PreDiffState *InstanceState + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffInfo *InstanceInfo + PostDiffDiff *InstanceDiff + PostDiffReturn HookAction + PostDiffError error + + PreProvisionResourceCalled bool + PreProvisionResourceInfo *InstanceInfo + PreProvisionInstanceState *InstanceState + PreProvisionResourceReturn HookAction + PreProvisionResourceError error + + PostProvisionResourceCalled bool + PostProvisionResourceInfo *InstanceInfo + PostProvisionInstanceState *InstanceState + PostProvisionResourceReturn HookAction + PostProvisionResourceError error + + PreProvisionCalled bool + PreProvisionInfo *InstanceInfo + PreProvisionProvisionerId string + PreProvisionReturn HookAction + PreProvisionError error + + PostProvisionCalled bool + PostProvisionInfo *InstanceInfo + PostProvisionProvisionerId string + PostProvisionErrorArg error + PostProvisionReturn HookAction + PostProvisionError error + + ProvisionOutputCalled bool + ProvisionOutputInfo *InstanceInfo + ProvisionOutputProvisionerId string + ProvisionOutputMessage string + + PostRefreshCalled bool + PostRefreshInfo *InstanceInfo + PostRefreshState *InstanceState + PostRefreshReturn HookAction + PostRefreshError error + + PreRefreshCalled bool + PreRefreshInfo *InstanceInfo + PreRefreshState *InstanceState + PreRefreshReturn HookAction + PreRefreshError error + + PreImportStateCalled bool + PreImportStateInfo *InstanceInfo + PreImportStateId string + PreImportStateReturn HookAction + PreImportStateError error + + PostImportStateCalled bool + PostImportStateInfo *InstanceInfo + PostImportStateState []*InstanceState + PostImportStateReturn HookAction + PostImportStateError error + + PostStateUpdateCalled bool + PostStateUpdateState *State + PostStateUpdateReturn HookAction + PostStateUpdateError error +} + +func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreApplyCalled = true + h.PreApplyInfo = n + h.PreApplyDiff = d + h.PreApplyState = s + return h.PreApplyReturn, h.PreApplyError +} + +func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostApplyCalled = true + h.PostApplyInfo = n + h.PostApplyState = s + h.PostApplyError = e + + if h.PostApplyFn != nil { + return h.PostApplyFn(n, s, e) + } + + return h.PostApplyReturn, h.PostApplyReturnError +} + +func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreDiffCalled = true + h.PreDiffInfo = n + h.PreDiffState = s + return h.PreDiffReturn, h.PreDiffError +} + +func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostDiffCalled = true + h.PostDiffInfo = n + h.PostDiffDiff = d + return h.PostDiffReturn, h.PostDiffError +} + +func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionResourceCalled = true + h.PreProvisionResourceInfo = n + h.PreProvisionInstanceState = s + return h.PreProvisionResourceReturn, h.PreProvisionResourceError +} + +func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionResourceCalled = true + h.PostProvisionResourceInfo = n + h.PostProvisionInstanceState = s + return h.PostProvisionResourceReturn, h.PostProvisionResourceError +} + +func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionCalled = true + h.PreProvisionInfo = n + h.PreProvisionProvisionerId = provId + return h.PreProvisionReturn, h.PreProvisionError +} + +func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionCalled = true + h.PostProvisionInfo = n + h.PostProvisionProvisionerId = provId + h.PostProvisionErrorArg = err + return h.PostProvisionReturn, h.PostProvisionError +} + +func (h *MockHook) ProvisionOutput( + n *InstanceInfo, + provId string, + msg string) { + h.Lock() + defer h.Unlock() + + h.ProvisionOutputCalled = true + h.ProvisionOutputInfo = n + h.ProvisionOutputProvisionerId = provId + h.ProvisionOutputMessage = msg +} + +func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreRefreshCalled = true + h.PreRefreshInfo = n + h.PreRefreshState = s + return h.PreRefreshReturn, h.PreRefreshError +} + +func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostRefreshCalled = true + h.PostRefreshInfo = n + h.PostRefreshState = s + return h.PostRefreshReturn, h.PostRefreshError +} + +func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreImportStateCalled = true + h.PreImportStateInfo = info + h.PreImportStateId = id + return h.PreImportStateReturn, h.PreImportStateError +} + +func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostImportStateCalled = true + h.PostImportStateInfo = info + h.PostImportStateState = s + return h.PostImportStateReturn, h.PostImportStateError +} + +func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostStateUpdateCalled = true + h.PostStateUpdateState = s + return h.PostStateUpdateReturn, h.PostStateUpdateError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go new file mode 100644 index 00000000..104d0098 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go @@ -0,0 +1,87 @@ +package terraform + +import ( + "sync/atomic" +) + +// stopHook is a private Hook implementation that Terraform uses to +// signal when to stop or cancel actions. +type stopHook struct { + stop uint32 +} + +func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { +} + +func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) hook() (HookAction, error) { + if h.Stopped() { + return HookActionHalt, nil + } + + return HookActionContinue, nil +} + +// reset should be called within the lock context +func (h *stopHook) Reset() { + atomic.StoreUint32(&h.stop, 0) +} + +func (h *stopHook) Stop() { + atomic.StoreUint32(&h.stop, 1) +} + +func (h *stopHook) Stopped() bool { + return atomic.LoadUint32(&h.stop) == 1 +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go new file mode 100644 index 00000000..08959717 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go new file mode 100644 index 00000000..f69267cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "fmt" + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { + return fmt.Sprintf("InstanceType(%d)", i) + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go new file mode 100644 index 00000000..19dcf21f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -0,0 +1,782 @@ +package terraform + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/flatmap" +) + +const ( + // VarEnvPrefix is the prefix of variables that are read from + // the environment to set variables here. + VarEnvPrefix = "TF_VAR_" +) + +// Interpolater is the structure responsible for determining the values +// for interpolations such as `aws_instance.foo.bar`. +type Interpolater struct { + Operation walkOperation + Meta *ContextMeta + Module *module.Tree + State *State + StateLock *sync.RWMutex + VariableValues map[string]interface{} + VariableValuesLock *sync.Mutex +} + +// InterpolationScope is the current scope of execution. This is required +// since some variables which are interpolated are dependent on what we're +// operating on and where we are. +type InterpolationScope struct { + Path []string + Resource *Resource +} + +// Values returns the values for all the variables in the given map. +func (i *Interpolater) Values( + scope *InterpolationScope, + vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { + if scope == nil { + scope = &InterpolationScope{} + } + + result := make(map[string]ast.Variable, len(vars)) + + // Copy the default variables + if i.Module != nil && scope != nil { + mod := i.Module + if len(scope.Path) > 1 { + mod = i.Module.Child(scope.Path[1:]) + } + for _, v := range mod.Config().Variables { + // Set default variables + if v.Default == nil { + continue + } + + n := fmt.Sprintf("var.%s", v.Name) + variable, err := hil.InterfaceToVariable(v.Default) + if err != nil { + return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default) + } + + result[n] = variable + } + } + + for n, rawV := range vars { + var err error + switch v := rawV.(type) { + case *config.CountVariable: + err = i.valueCountVar(scope, n, v, result) + case *config.ModuleVariable: + err = i.valueModuleVar(scope, n, v, result) + case *config.PathVariable: + err = i.valuePathVar(scope, n, v, result) + case *config.ResourceVariable: + err = i.valueResourceVar(scope, n, v, result) + case *config.SelfVariable: + err = i.valueSelfVar(scope, n, v, result) + case *config.SimpleVariable: + err = i.valueSimpleVar(scope, n, v, result) + case *config.TerraformVariable: + err = i.valueTerraformVar(scope, n, v, result) + case *config.UserVariable: + err = i.valueUserVar(scope, n, v, result) + default: + err = fmt.Errorf("%s: unknown variable type: %T", n, rawV) + } + + if err != nil { + return nil, err + } + } + + return result, nil +} + +func (i *Interpolater) valueCountVar( + scope *InterpolationScope, + n string, + v *config.CountVariable, + result map[string]ast.Variable) error { + switch v.Type { + case config.CountValueIndex: + if scope.Resource == nil { + return fmt.Errorf("%s: count.index is only valid within resources", n) + } + result[n] = ast.Variable{ + Value: scope.Resource.CountIndex, + Type: ast.TypeInt, + } + return nil + default: + return fmt.Errorf("%s: unknown count type: %#v", n, v.Type) + } +} + +func unknownVariable() ast.Variable { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: config.UnknownVariableValue, + } +} + +func unknownValue() string { + return hil.UnknownValue +} + +func (i *Interpolater) valueModuleVar( + scope *InterpolationScope, + n string, + v *config.ModuleVariable, + result map[string]ast.Variable) error { + + // Build the path to the child module we want + path := make([]string, len(scope.Path), len(scope.Path)+1) + copy(path, scope.Path) + path = append(path, v.Name) + + // Grab the lock so that if other interpolations are running or + // state is being modified, we'll be safe. + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + // Get the module where we're looking for the value + mod := i.State.ModuleByPath(path) + if mod == nil { + // If the module doesn't exist, then we can return an empty string. + // This happens usually only in Refresh() when we haven't populated + // a state. During validation, we semantically verify that all + // modules reference other modules, and graph ordering should + // ensure that the module is in the state, so if we reach this + // point otherwise it really is a panic. + result[n] = unknownVariable() + + // During apply this is always an error + if i.Operation == walkApply { + return fmt.Errorf( + "Couldn't find module %q for var: %s", + v.Name, v.FullKey()) + } + } else { + // Get the value from the outputs + if outputState, ok := mod.Outputs[v.Field]; ok { + output, err := hil.InterfaceToVariable(outputState.Value) + if err != nil { + return err + } + result[n] = output + } else { + // Same reasons as the comment above. + result[n] = unknownVariable() + + // During apply this is always an error + if i.Operation == walkApply { + return fmt.Errorf( + "Couldn't find output %q for module var: %s", + v.Field, v.FullKey()) + } + } + } + + return nil +} + +func (i *Interpolater) valuePathVar( + scope *InterpolationScope, + n string, + v *config.PathVariable, + result map[string]ast.Variable) error { + switch v.Type { + case config.PathValueCwd: + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf( + "Couldn't get cwd for var %s: %s", + v.FullKey(), err) + } + + result[n] = ast.Variable{ + Value: wd, + Type: ast.TypeString, + } + case config.PathValueModule: + if t := i.Module.Child(scope.Path[1:]); t != nil { + result[n] = ast.Variable{ + Value: t.Config().Dir, + Type: ast.TypeString, + } + } + case config.PathValueRoot: + result[n] = ast.Variable{ + Value: i.Module.Config().Dir, + Type: ast.TypeString, + } + default: + return fmt.Errorf("%s: unknown path type: %#v", n, v.Type) + } + + return nil + +} + +func (i *Interpolater) valueResourceVar( + scope *InterpolationScope, + n string, + v *config.ResourceVariable, + result map[string]ast.Variable) error { + // If we're computing all dynamic fields, then module vars count + // and we mark it as computed. + if i.Operation == walkValidate { + result[n] = unknownVariable() + return nil + } + + var variable *ast.Variable + var err error + + if v.Multi && v.Index == -1 { + variable, err = i.computeResourceMultiVariable(scope, v) + } else { + variable, err = i.computeResourceVariable(scope, v) + } + + if err != nil { + return err + } + + if variable == nil { + // During the input walk we tolerate missing variables because + // we haven't yet had a chance to refresh state, so dynamic data may + // not yet be complete. + // If it truly is missing, we'll catch it on a later walk. + // This applies only to graph nodes that interpolate during the + // config walk, e.g. providers. + if i.Operation == walkInput || i.Operation == walkRefresh { + result[n] = unknownVariable() + return nil + } + + return fmt.Errorf("variable %q is nil, but no error was reported", v.Name) + } + + result[n] = *variable + return nil +} + +func (i *Interpolater) valueSelfVar( + scope *InterpolationScope, + n string, + v *config.SelfVariable, + result map[string]ast.Variable) error { + if scope == nil || scope.Resource == nil { + return fmt.Errorf( + "%s: invalid scope, self variables are only valid on resources", n) + } + + rv, err := config.NewResourceVariable(fmt.Sprintf( + "%s.%s.%d.%s", + scope.Resource.Type, + scope.Resource.Name, + scope.Resource.CountIndex, + v.Field)) + if err != nil { + return err + } + + return i.valueResourceVar(scope, n, rv, result) +} + +func (i *Interpolater) valueSimpleVar( + scope *InterpolationScope, + n string, + v *config.SimpleVariable, + result map[string]ast.Variable) error { + // This error message includes some information for people who + // relied on this for their template_file data sources. We should + // remove this at some point but there isn't any rush. + return fmt.Errorf( + "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+ + "then you must escape the interpolation with two dollar signs. For\n"+ + "example: ${a} becomes $${a}.", + n, n) +} + +func (i *Interpolater) valueTerraformVar( + scope *InterpolationScope, + n string, + v *config.TerraformVariable, + result map[string]ast.Variable) error { + if v.Field != "env" { + return fmt.Errorf( + "%s: only supported key for 'terraform.X' interpolations is 'env'", n) + } + + if i.Meta == nil { + return fmt.Errorf( + "%s: internal error: nil Meta. Please report a bug.", n) + } + + result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env} + return nil +} + +func (i *Interpolater) valueUserVar( + scope *InterpolationScope, + n string, + v *config.UserVariable, + result map[string]ast.Variable) error { + i.VariableValuesLock.Lock() + defer i.VariableValuesLock.Unlock() + val, ok := i.VariableValues[v.Name] + if ok { + varValue, err := hil.InterfaceToVariable(val) + if err != nil { + return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", + v.Name, val, err) + } + result[n] = varValue + return nil + } + + if _, ok := result[n]; !ok && i.Operation == walkValidate { + result[n] = unknownVariable() + return nil + } + + // Look up if we have any variables with this prefix because + // those are map overrides. Include those. + for k, val := range i.VariableValues { + if strings.HasPrefix(k, v.Name+".") { + keyComponents := strings.Split(k, ".") + overrideKey := keyComponents[len(keyComponents)-1] + + mapInterface, ok := result["var."+v.Name] + if !ok { + return fmt.Errorf("override for non-existent variable: %s", v.Name) + } + + mapVariable := mapInterface.Value.(map[string]ast.Variable) + + varValue, err := hil.InterfaceToVariable(val) + if err != nil { + return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", + v.Name, val, err) + } + mapVariable[overrideKey] = varValue + } + } + + return nil +} + +func (i *Interpolater) computeResourceVariable( + scope *InterpolationScope, + v *config.ResourceVariable) (*ast.Variable, error) { + id := v.ResourceId() + if v.Multi { + id = fmt.Sprintf("%s.%d", id, v.Index) + } + + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + unknownVariable := unknownVariable() + + // These variables must be declared early because of the use of GOTO + var isList bool + var isMap bool + + // Get the information about this resource variable, and verify + // that it exists and such. + module, cr, err := i.resourceVariableInfo(scope, v) + if err != nil { + return nil, err + } + + // If we're requesting "count" its a special variable that we grab + // directly from the config itself. + if v.Field == "count" { + var count int + if cr != nil { + count, err = cr.Count() + } else { + count, err = i.resourceCountMax(module, cr, v) + } + if err != nil { + return nil, fmt.Errorf( + "Error reading %s count: %s", + v.ResourceId(), + err) + } + + return &ast.Variable{Type: ast.TypeInt, Value: count}, nil + } + + // Get the resource out from the state. We know the state exists + // at this point and if there is a state, we expect there to be a + // resource with the given name. + var r *ResourceState + if module != nil && len(module.Resources) > 0 { + var ok bool + r, ok = module.Resources[id] + if !ok && v.Multi && v.Index == 0 { + r, ok = module.Resources[v.ResourceId()] + } + if !ok { + r = nil + } + } + if r == nil || r.Primary == nil { + if i.Operation == walkApply || i.Operation == walkPlan { + return nil, fmt.Errorf( + "Resource '%s' not found for variable '%s'", + v.ResourceId(), + v.FullKey()) + } + + // If we have no module in the state yet or count, return empty. + // NOTE(@mitchellh): I actually don't know why this is here. During + // a refactor I kept this here to maintain the same behavior, but + // I'm not sure why its here. + if module == nil || len(module.Resources) == 0 { + return nil, nil + } + + goto MISSING + } + + if attr, ok := r.Primary.Attributes[v.Field]; ok { + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + + // computed list or map attribute + _, isList = r.Primary.Attributes[v.Field+".#"] + _, isMap = r.Primary.Attributes[v.Field+".%"] + if isList || isMap { + variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) + return &variable, err + } + + // At apply time, we can't do the "maybe has it" check below + // that we need for plans since parent elements might be computed. + // Therefore, it is an error and we're missing the key. + // + // TODO: test by creating a state and configuration that is referencing + // a non-existent variable "foo.bar" where the state only has "foo" + // and verify plan works, but apply doesn't. + if i.Operation == walkApply || i.Operation == walkDestroy { + goto MISSING + } + + // We didn't find the exact field, so lets separate the dots + // and see if anything along the way is a computed set. i.e. if + // we have "foo.0.bar" as the field, check to see if "foo" is + // a computed list. If so, then the whole thing is computed. + if parts := strings.Split(v.Field, "."); len(parts) > 1 { + for i := 1; i < len(parts); i++ { + // Lists and sets make this + key := fmt.Sprintf("%s.#", strings.Join(parts[:i], ".")) + if attr, ok := r.Primary.Attributes[key]; ok { + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + + // Maps make this + key = fmt.Sprintf("%s", strings.Join(parts[:i], ".")) + if attr, ok := r.Primary.Attributes[key]; ok { + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + } + } + +MISSING: + // Validation for missing interpolations should happen at a higher + // semantic level. If we reached this point and don't have variables, + // just return the computed value. + if scope == nil && scope.Resource == nil { + return &unknownVariable, nil + } + + // If the operation is refresh, it isn't an error for a value to + // be unknown. Instead, we return that the value is computed so + // that the graph can continue to refresh other nodes. It doesn't + // matter because the config isn't interpolated anyways. + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { + return &unknownVariable, nil + } + + return nil, fmt.Errorf( + "Resource '%s' does not have attribute '%s' "+ + "for variable '%s'", + id, + v.Field, + v.FullKey()) +} + +func (i *Interpolater) computeResourceMultiVariable( + scope *InterpolationScope, + v *config.ResourceVariable) (*ast.Variable, error) { + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + unknownVariable := unknownVariable() + + // If we're only looking for input, we don't need to expand a + // multi-variable. This prevents us from encountering things that should be + // known but aren't because the state has yet to be refreshed. + if i.Operation == walkInput { + return &unknownVariable, nil + } + + // Get the information about this resource variable, and verify + // that it exists and such. + module, cr, err := i.resourceVariableInfo(scope, v) + if err != nil { + return nil, err + } + + // Get the keys for all the resources that are created for this resource + countMax, err := i.resourceCountMax(module, cr, v) + if err != nil { + return nil, err + } + + // If count is zero, we return an empty list + if countMax == 0 { + return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil + } + + // If we have no module in the state yet or count, return unknown + if module == nil || len(module.Resources) == 0 { + return &unknownVariable, nil + } + + var values []interface{} + for idx := 0; idx < countMax; idx++ { + id := fmt.Sprintf("%s.%d", v.ResourceId(), idx) + + // ID doesn't have a trailing index. We try both here, but if a value + // without a trailing index is found we prefer that. This choice + // is for legacy reasons: older versions of TF preferred it. + if id == v.ResourceId()+".0" { + potential := v.ResourceId() + if _, ok := module.Resources[potential]; ok { + id = potential + } + } + + r, ok := module.Resources[id] + if !ok { + continue + } + + if r.Primary == nil { + continue + } + + if singleAttr, ok := r.Primary.Attributes[v.Field]; ok { + values = append(values, singleAttr) + continue + } + + // computed list or map attribute + _, isList := r.Primary.Attributes[v.Field+".#"] + _, isMap := r.Primary.Attributes[v.Field+".%"] + if !(isList || isMap) { + continue + } + multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) + if err != nil { + return nil, err + } + + values = append(values, multiAttr) + } + + if len(values) == 0 { + // If the operation is refresh, it isn't an error for a value to + // be unknown. Instead, we return that the value is computed so + // that the graph can continue to refresh other nodes. It doesn't + // matter because the config isn't interpolated anyways. + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { + return &unknownVariable, nil + } + + return nil, fmt.Errorf( + "Resource '%s' does not have attribute '%s' "+ + "for variable '%s'", + v.ResourceId(), + v.Field, + v.FullKey()) + } + + variable, err := hil.InterfaceToVariable(values) + return &variable, err +} + +func (i *Interpolater) interpolateComplexTypeAttribute( + resourceID string, + attributes map[string]string) (ast.Variable, error) { + + // We can now distinguish between lists and maps in state by the count field: + // - lists (and by extension, sets) use the traditional .# notation + // - maps use the newer .% notation + // Consequently here we can decide how to deal with the keys appropriately + // based on whether the type is a map of list. + if lengthAttr, isList := attributes[resourceID+".#"]; isList { + log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)", + resourceID, lengthAttr) + + // In Terraform's internal dotted representation of list-like attributes, the + // ".#" count field is marked as unknown to indicate "this whole list is + // unknown". We must honor that meaning here so computed references can be + // treated properly during the plan phase. + if lengthAttr == config.UnknownVariableValue { + return unknownVariable(), nil + } + + expanded := flatmap.Expand(attributes, resourceID) + return hil.InterfaceToVariable(expanded) + } + + if lengthAttr, isMap := attributes[resourceID+".%"]; isMap { + log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)", + resourceID, lengthAttr) + + // In Terraform's internal dotted representation of map attributes, the + // ".%" count field is marked as unknown to indicate "this whole list is + // unknown". We must honor that meaning here so computed references can be + // treated properly during the plan phase. + if lengthAttr == config.UnknownVariableValue { + return unknownVariable(), nil + } + + expanded := flatmap.Expand(attributes, resourceID) + return hil.InterfaceToVariable(expanded) + } + + return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID) +} + +func (i *Interpolater) resourceVariableInfo( + scope *InterpolationScope, + v *config.ResourceVariable) (*ModuleState, *config.Resource, error) { + // Get the module tree that contains our current path. This is + // either the current module (path is empty) or a child. + modTree := i.Module + if len(scope.Path) > 1 { + modTree = i.Module.Child(scope.Path[1:]) + } + + // Get the resource from the configuration so we can verify + // that the resource is in the configuration and so we can access + // the configuration if we need to. + var cr *config.Resource + for _, r := range modTree.Config().Resources { + if r.Id() == v.ResourceId() { + cr = r + break + } + } + + // Get the relevant module + module := i.State.ModuleByPath(scope.Path) + return module, cr, nil +} + +func (i *Interpolater) resourceCountMax( + ms *ModuleState, + cr *config.Resource, + v *config.ResourceVariable) (int, error) { + id := v.ResourceId() + + // If we're NOT applying, then we assume we can read the count + // from the state. Plan and so on may not have any state yet so + // we do a full interpolation. + if i.Operation != walkApply { + if cr == nil { + return 0, nil + } + + count, err := cr.Count() + if err != nil { + return 0, err + } + + return count, nil + } + + // We need to determine the list of resource keys to get values from. + // This needs to be sorted so the order is deterministic. We used to + // use "cr.Count()" but that doesn't work if the count is interpolated + // and we can't guarantee that so we instead depend on the state. + max := -1 + for k, _ := range ms.Resources { + // Get the index number for this resource + index := "" + if k == id { + // If the key is the id, then its just 0 (no explicit index) + index = "0" + } else if strings.HasPrefix(k, id+".") { + // Grab the index number out of the state + index = k[len(id+"."):] + if idx := strings.IndexRune(index, '.'); idx >= 0 { + index = index[:idx] + } + } + + // If there was no index then this resource didn't match + // the one we're looking for, exit. + if index == "" { + continue + } + + // Turn the index into an int + raw, err := strconv.ParseInt(index, 0, 0) + if err != nil { + return 0, fmt.Errorf( + "%s: error parsing index %q as int: %s", + id, index, err) + } + + // Keep track of this index if its the max + if new := int(raw); new > max { + max = new + } + } + + // If we never found any matching resources in the state, we + // have zero. + if max == -1 { + return 0, nil + } + + // The result value is "max+1" because we're returning the + // max COUNT, not the max INDEX, and we zero-index. + return max + 1, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go new file mode 100644 index 00000000..ca99685a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/path.go @@ -0,0 +1,24 @@ +package terraform + +import ( + "crypto/md5" + "encoding/hex" +) + +// PathCacheKey returns a cache key for a module path. +// +// TODO: test +func PathCacheKey(path []string) string { + // There is probably a better way to do this, but this is working for now. + // We just create an MD5 hash of all the MD5 hashes of all the path + // elements. This gets us the property that it is unique per ordering. + hash := md5.New() + for _, p := range path { + single := md5.Sum([]byte(p)) + if _, err := hash.Write(single[:]); err != nil { + panic(err) + } + } + + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go new file mode 100644 index 00000000..ea088450 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -0,0 +1,153 @@ +package terraform + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "io" + "sync" + + "github.com/hashicorp/terraform/config/module" +) + +func init() { + gob.Register(make([]interface{}, 0)) + gob.Register(make([]map[string]interface{}, 0)) + gob.Register(make(map[string]interface{})) + gob.Register(make(map[string]string)) +} + +// Plan represents a single Terraform execution plan, which contains +// all the information necessary to make an infrastructure change. +// +// A plan has to contain basically the entire state of the world +// necessary to make a change: the state, diff, config, backend config, etc. +// This is so that it can run alone without any other data. +type Plan struct { + Diff *Diff + Module *module.Tree + State *State + Vars map[string]interface{} + Targets []string + + // Backend is the backend that this plan should use and store data with. + Backend *BackendState + + once sync.Once +} + +// Context returns a Context with the data encapsulated in this plan. +// +// The following fields in opts are overridden by the plan: Config, +// Diff, State, Variables. +func (p *Plan) Context(opts *ContextOpts) (*Context, error) { + opts.Diff = p.Diff + opts.Module = p.Module + opts.State = p.State + opts.Targets = p.Targets + + opts.Variables = make(map[string]interface{}) + for k, v := range p.Vars { + opts.Variables[k] = v + } + + return NewContext(opts) +} + +func (p *Plan) String() string { + buf := new(bytes.Buffer) + buf.WriteString("DIFF:\n\n") + buf.WriteString(p.Diff.String()) + buf.WriteString("\n\nSTATE:\n\n") + buf.WriteString(p.State.String()) + return buf.String() +} + +func (p *Plan) init() { + p.once.Do(func() { + if p.Diff == nil { + p.Diff = new(Diff) + p.Diff.init() + } + + if p.State == nil { + p.State = new(State) + p.State.init() + } + + if p.Vars == nil { + p.Vars = make(map[string]interface{}) + } + }) +} + +// The format byte is prefixed into the plan file format so that we have +// the ability in the future to change the file format if we want for any +// reason. +const planFormatMagic = "tfplan" +const planFormatVersion byte = 1 + +// ReadPlan reads a plan structure out of a reader in the format that +// was written by WritePlan. +func ReadPlan(src io.Reader) (*Plan, error) { + var result *Plan + var err error + n := 0 + + // Verify the magic bytes + magic := make([]byte, len(planFormatMagic)) + for n < len(magic) { + n, err = src.Read(magic[n:]) + if err != nil { + return nil, fmt.Errorf("error while reading magic bytes: %s", err) + } + } + if string(magic) != planFormatMagic { + return nil, fmt.Errorf("not a valid plan file") + } + + // Verify the version is something we can read + var formatByte [1]byte + n, err = src.Read(formatByte[:]) + if err != nil { + return nil, err + } + if n != len(formatByte) { + return nil, errors.New("failed to read plan version byte") + } + + if formatByte[0] != planFormatVersion { + return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0]) + } + + dec := gob.NewDecoder(src) + if err := dec.Decode(&result); err != nil { + return nil, err + } + + return result, nil +} + +// WritePlan writes a plan somewhere in a binary format. +func WritePlan(d *Plan, dst io.Writer) error { + // Write the magic bytes so we can determine the file format later + n, err := dst.Write([]byte(planFormatMagic)) + if err != nil { + return err + } + if n != len(planFormatMagic) { + return errors.New("failed to write plan format magic bytes") + } + + // Write a version byte so we can iterate on version at some point + n, err = dst.Write([]byte{planFormatVersion}) + if err != nil { + return err + } + if n != 1 { + return errors.New("failed to write plan version byte") + } + + return gob.NewEncoder(dst).Encode(d) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go new file mode 100644 index 00000000..0acf0beb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -0,0 +1,360 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" +) + +// ResourceProvisionerConfig is used to pair a provisioner +// with its provided configuration. This allows us to use singleton +// instances of each ResourceProvisioner and to keep the relevant +// configuration instead of instantiating a new Provisioner for each +// resource. +type ResourceProvisionerConfig struct { + Type string + Provisioner ResourceProvisioner + Config *ResourceConfig + RawConfig *config.RawConfig + ConnInfo *config.RawConfig +} + +// Resource encapsulates a resource, its configuration, its provider, +// its current state, and potentially a desired diff from the state it +// wants to reach. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Provisioners []*ResourceProvisionerConfig + Flags ResourceFlag +} + +// ResourceKind specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string + + // uniqueExtra is an internal field that can be populated to supply + // extra metadata that is used to identify a unique instance in + // the graph walk. This will be appended to HumanID when uniqueId + // is called. + uniqueExtra string +} + +// HumanId is a unique Id that is human-friendly and useful for UI elements. +func (i *InstanceInfo) HumanId() string { + if i == nil { + return "" + } + + if len(i.ModulePath) <= 1 { + return i.Id + } + + return fmt.Sprintf( + "module.%s.%s", + strings.Join(i.ModulePath[1:], "."), + i.Id) +} + +func (i *InstanceInfo) uniqueId() string { + prefix := i.HumanId() + if v := i.uniqueExtra; v != "" { + prefix += " " + v + } + + return prefix +} + +// ResourceConfig holds the configuration given for a resource. This is +// done instead of a raw `map[string]interface{}` type so that rich +// methods can be added to it to make dealing with it easier. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} + + raw *config.RawConfig +} + +// NewResourceConfig creates a new ResourceConfig from a config.RawConfig. +func NewResourceConfig(c *config.RawConfig) *ResourceConfig { + result := &ResourceConfig{raw: c} + result.interpolateForce() + return result +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + // For the raw configuration, we can just use its own copy method + result.raw = c.raw.Copy() + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == unknownValue() { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if i >= int64(cv.Len()) { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// interpolateForce is a temporary thing. We want to get rid of interpolate +// above and likewise this, but it can only be done after the f-ast-graph +// refactor is complete. +func (c *ResourceConfig) interpolateForce() { + if c.raw == nil { + var err error + c.raw, err = config.NewRawConfig(make(map[string]interface{})) + if err != nil { + panic(err) + } + } + + c.ComputedKeys = c.raw.UnknownKeys() + c.Raw = c.raw.RawMap() + c.Config = c.raw.Config() +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == unknownValue() { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go new file mode 100644 index 00000000..a8a0c955 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -0,0 +1,301 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string + Mode config.ResourceMode // significant only if InstanceTypeSet +} + +// Copy returns a copy of this ResourceAddress +func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + + n := &ResourceAddress{ + Path: make([]string, 0, len(r.Path)), + Index: r.Index, + InstanceType: r.InstanceType, + Name: r.Name, + Type: r.Type, + Mode: r.Mode, + } + for _, p := range r.Path { + n.Path = append(n.Path, p) + } + return n +} + +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case config.ManagedResourceMode: + // nothing to do + case config.DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case TypePrimary: + name += ".primary" + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case config.ManagedResourceMode: + // Done + case config.DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressConfig creates a resource address from a config.Resource +func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) { + return &ResourceAddress{ + Type: r.Type, + Name: r.Name, + Index: -1, + InstanceType: TypePrimary, + Mode: r.Mode, + }, nil +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := config.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = config.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != config.DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := config.ManagedResourceMode + if matches["data_prefix"] != "" { + mode = config.DataResourceMode + } + resourceIndex, err := ParseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := ParseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := ParseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == config.DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf("must target specific data instance") + } + + return &ResourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || + reflect.DeepEqual(addr.Path, other.Path) + + indexMatch := addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index + + nameMatch := addr.Name == "" || + other.Name == "" || + addr.Name == other.Name + + typeMatch := addr.Type == "" || + other.Type == "" || + addr.Type == other.Type + + // mode is significant only when type is set + modeMatch := addr.Type == "" || + other.Type == "" || + addr.Mode == other.Mode + + return pathMatch && + indexMatch && + addr.InstanceType == other.InstanceType && + nameMatch && + typeMatch && + modeMatch +} + +func ParseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func ParseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "", "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.[^.]+\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("Problem parsing address: %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go new file mode 100644 index 00000000..1a68c869 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -0,0 +1,204 @@ +package terraform + +// ResourceProvider is an interface that must be implemented by any +// resource provider: the thing that creates and manages the resources in +// a Terraform configuration. +// +// Important implementation note: All returned pointers, such as +// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to +// shared data. Terraform is highly parallel and assumes that this data is safe +// to read/write in parallel so it must be unique references. Note that it is +// safe to return arguments as results, however. +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // Input is called to ask the provider to ask the user for input + // for completing the configuration if necesarry. + // + // This may or may not be called, so resource provider writers shouldn't + // rely on this being available to set some default values for validate + // later. Example of a situation where this wouldn't be called is if + // the user is not using a TTY. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in Terraform, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go new file mode 100644 index 00000000..f5315339 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -0,0 +1,297 @@ +package terraform + +import "sync" + +// MockResourceProvider implements ResourceProvider but mocks out all the +// calls for testing purposes. +type MockResourceProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + CloseCalled bool + CloseError error + InputCalled bool + InputInput UIInput + InputConfig *ResourceConfig + InputReturnConfig *ResourceConfig + InputReturnError error + InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) + ApplyCalled bool + ApplyInfo *InstanceInfo + ApplyState *InstanceState + ApplyDiff *InstanceDiff + ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) + ApplyReturn *InstanceState + ApplyReturnError error + ConfigureCalled bool + ConfigureConfig *ResourceConfig + ConfigureFn func(*ResourceConfig) error + ConfigureReturnError error + DiffCalled bool + DiffInfo *InstanceInfo + DiffState *InstanceState + DiffDesired *ResourceConfig + DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) + DiffReturn *InstanceDiff + DiffReturnError error + RefreshCalled bool + RefreshInfo *InstanceInfo + RefreshState *InstanceState + RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) + RefreshReturn *InstanceState + RefreshReturnError error + ResourcesCalled bool + ResourcesReturn []ResourceType + ReadDataApplyCalled bool + ReadDataApplyInfo *InstanceInfo + ReadDataApplyDiff *InstanceDiff + ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) + ReadDataApplyReturn *InstanceState + ReadDataApplyReturnError error + ReadDataDiffCalled bool + ReadDataDiffInfo *InstanceInfo + ReadDataDiffDesired *ResourceConfig + ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + ReadDataDiffReturn *InstanceDiff + ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error + DataSourcesCalled bool + DataSourcesReturn []DataSource + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(*ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateResourceCalled bool + ValidateResourceType string + ValidateResourceConfig *ResourceConfig + ValidateResourceReturnWarns []string + ValidateResourceReturnErrors []error + ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateDataSourceCalled bool + ValidateDataSourceType string + ValidateDataSourceConfig *ResourceConfig + ValidateDataSourceReturnWarns []string + ValidateDataSourceReturnErrors []error + + ImportStateCalled bool + ImportStateInfo *InstanceInfo + ImportStateID string + ImportStateReturn []*InstanceState + ImportStateReturnError error + ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) +} + +func (p *MockResourceProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} + +func (p *MockResourceProvider) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.InputCalled = true + p.InputInput = input + p.InputConfig = c + if p.InputFn != nil { + return p.InputFn(input, c) + } + return p.InputReturnConfig, p.InputReturnError +} + +func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceCalled = true + p.ValidateResourceType = t + p.ValidateResourceConfig = c + + if p.ValidateResourceFn != nil { + return p.ValidateResourceFn(t, c) + } + + return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors +} + +func (p *MockResourceProvider) Configure(c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureConfig = c + + if p.ConfigureFn != nil { + return p.ConfigureFn(c) + } + + return p.ConfigureReturnError +} + +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + +func (p *MockResourceProvider) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // We only lock while writing data. Reading is fine + p.Lock() + p.ApplyCalled = true + p.ApplyInfo = info + p.ApplyState = state + p.ApplyDiff = diff + p.Unlock() + + if p.ApplyFn != nil { + return p.ApplyFn(info, state, diff) + } + + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError +} + +func (p *MockResourceProvider) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.DiffCalled = true + p.DiffInfo = info + p.DiffState = state + p.DiffDesired = desired + if p.DiffFn != nil { + return p.DiffFn(info, state, desired) + } + + return p.DiffReturn.DeepCopy(), p.DiffReturnError +} + +func (p *MockResourceProvider) Refresh( + info *InstanceInfo, + s *InstanceState) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.RefreshCalled = true + p.RefreshInfo = info + p.RefreshState = s + + if p.RefreshFn != nil { + return p.RefreshFn(info, s) + } + + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError +} + +func (p *MockResourceProvider) Resources() []ResourceType { + p.Lock() + defer p.Unlock() + + p.ResourcesCalled = true + return p.ResourcesReturn +} + +func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ImportStateCalled = true + p.ImportStateInfo = info + p.ImportStateID = id + if p.ImportStateFn != nil { + return p.ImportStateFn(info, id) + } + + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError +} + +func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceCalled = true + p.ValidateDataSourceType = t + p.ValidateDataSourceConfig = c + + if p.ValidateDataSourceFn != nil { + return p.ValidateDataSourceFn(t, c) + } + + return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors +} + +func (p *MockResourceProvider) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataDiffCalled = true + p.ReadDataDiffInfo = info + p.ReadDataDiffDesired = desired + if p.ReadDataDiffFn != nil { + return p.ReadDataDiffFn(info, desired) + } + + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError +} + +func (p *MockResourceProvider) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataApplyCalled = true + p.ReadDataApplyInfo = info + p.ReadDataApplyDiff = d + + if p.ReadDataApplyFn != nil { + return p.ReadDataApplyFn(info, d) + } + + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError +} + +func (p *MockResourceProvider) DataSources() []DataSource { + p.Lock() + defer p.Unlock() + + p.DataSourcesCalled = true + return p.DataSourcesReturn +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go new file mode 100644 index 00000000..361ec1ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go @@ -0,0 +1,54 @@ +package terraform + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a Terraform configuration. +type ResourceProvisioner interface { + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns the new + // resource state along with an error. Instead of a diff, the ResourceConfig + // is provided since provisioners only run after a resource has been + // newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go new file mode 100644 index 00000000..f471a518 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go @@ -0,0 +1,72 @@ +package terraform + +import "sync" + +// MockResourceProvisioner implements ResourceProvisioner but mocks out all the +// calls for testing purposes. +type MockResourceProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + ApplyCalled bool + ApplyOutput UIOutput + ApplyState *InstanceState + ApplyConfig *ResourceConfig + ApplyFn func(*InstanceState, *ResourceConfig) error + ApplyReturnError error + + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(c *ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error +} + +func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvisioner) Apply( + output UIOutput, + state *InstanceState, + c *ResourceConfig) error { + p.Lock() + + p.ApplyCalled = true + p.ApplyOutput = output + p.ApplyState = state + p.ApplyConfig = c + if p.ApplyFn != nil { + fn := p.ApplyFn + p.Unlock() + return fn(state, c) + } + + defer p.Unlock() + return p.ApplyReturnError +} + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go new file mode 100644 index 00000000..20f1d8a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go @@ -0,0 +1,132 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// GraphSemanticChecker is the interface that semantic checks across +// the entire Terraform graph implement. +// +// The graph should NOT be modified by the semantic checker. +type GraphSemanticChecker interface { + Check(*dag.Graph) error +} + +// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker +// that runs a list of SemanticCheckers against the vertices of the graph +// in no specified order. +type UnorderedSemanticCheckRunner struct { + Checks []SemanticChecker +} + +func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error { + var err error + for _, v := range g.Vertices() { + for _, check := range sc.Checks { + if e := check.Check(g, v); e != nil { + err = multierror.Append(err, e) + } + } + } + + return err +} + +// SemanticChecker is the interface that semantic checks across the +// Terraform graph implement. Errors are accumulated. Even after an error +// is returned, child vertices in the graph will still be visited. +// +// The graph should NOT be modified by the semantic checker. +// +// The order in which vertices are visited is left unspecified, so the +// semantic checks should not rely on that. +type SemanticChecker interface { + Check(*dag.Graph, dag.Vertex) error +} + +// smcUserVariables does all the semantic checks to verify that the +// variables given satisfy the configuration itself. +func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { + var errs []error + + cvs := make(map[string]*config.Variable) + for _, v := range c.Variables { + cvs[v.Name] = v + } + + // Check that all required variables are present + required := make(map[string]struct{}) + for _, v := range c.Variables { + if v.Required() { + required[v.Name] = struct{}{} + } + } + for k, _ := range vs { + delete(required, k) + } + if len(required) > 0 { + for k, _ := range required { + errs = append(errs, fmt.Errorf( + "Required variable not set: %s", k)) + } + } + + // Check that types match up + for name, proposedValue := range vs { + // Check for "map.key" fields. These stopped working with Terraform + // 0.7 but we do this to surface a better error message informing + // the user what happened. + if idx := strings.Index(name, "."); idx > 0 { + key := name[:idx] + if _, ok := cvs[key]; ok { + errs = append(errs, fmt.Errorf( + "%s: Overriding map keys with the format `name.key` is no "+ + "longer allowed. You may still override keys by setting "+ + "`name = { key = value }`. The maps will be merged. This "+ + "behavior appeared in 0.7.0.", name)) + continue + } + } + + schema, ok := cvs[name] + if !ok { + continue + } + + declaredType := schema.Type() + + switch declaredType { + case config.VariableTypeString: + switch proposedValue.(type) { + case string: + continue + } + case config.VariableTypeMap: + switch v := proposedValue.(type) { + case map[string]interface{}: + continue + case []map[string]interface{}: + // if we have a list of 1 map, it will get coerced later as needed + if len(v) == 1 { + continue + } + } + case config.VariableTypeList: + switch proposedValue.(type) { + case []interface{}: + continue + } + } + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) + } + + // TODO(mitchellh): variables that are unknown + + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go new file mode 100644 index 00000000..074b6824 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -0,0 +1,2118 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/config" + "github.com/mitchellh/copystructure" + "github.com/satori/go.uuid" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) []string { + k := len(rootModulePath) + + // If we already have a root module prefix, we're done + if len(p) >= len(rootModulePath) { + if reflect.DeepEqual(p[:k], rootModulePath) { + return p + } + } + + // None? Prefix it + result := make([]string, len(rootModulePath)+len(p)) + copy(result, rootModulePath) + copy(result[k:], p) + return result +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path []string) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path []string) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + m = &ModuleState{Path: path} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path []string) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path []string) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + if reflect.DeepEqual(mod.Path, path) { + return mod + } + } + return nil +} + +// ModuleOrphans returns all the module orphans in this state by +// returning their full paths. These paths can be used with ModuleByPath +// to return the actual state. +func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string { + s.Lock() + defer s.Unlock() + + return s.moduleOrphans(path, c) + +} + +func (s *State) moduleOrphans(path []string, c *config.Config) [][]string { + // direct keeps track of what direct children we have both in our config + // and in our state. childrenKeys keeps track of what isn't an orphan. + direct := make(map[string]struct{}) + childrenKeys := make(map[string]struct{}) + if c != nil { + for _, m := range c.Modules { + childrenKeys[m.Name] = struct{}{} + direct[m.Name] = struct{}{} + } + } + + // Go over the direct children and find any that aren't in our keys. + var orphans [][]string + for _, m := range s.children(path) { + key := m.Path[len(m.Path)-1] + + // Record that we found this key as a direct child. We use this + // later to find orphan nested modules. + direct[key] = struct{}{} + + // If we have a direct child still in our config, it is not an orphan + if _, ok := childrenKeys[key]; ok { + continue + } + + orphans = append(orphans, m.Path) + } + + // Find the orphans that are nested... + for _, m := range s.Modules { + if m == nil { + continue + } + + // We only want modules that are at least grandchildren + if len(m.Path) < len(path)+2 { + continue + } + + // If it isn't part of our tree, continue + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + // If we have the direct child, then just skip it. + key := m.Path[len(path)] + if _, ok := direct[key]; ok { + continue + } + + orphanPath := m.Path[:len(path)+1] + + // Don't double-add if we've already added this orphan (which can happen if + // there are multiple nested sub-modules that get orphaned together). + alreadyAdded := false + for _, o := range orphans { + if reflect.DeepEqual(o, orphanPath) { + alreadyAdded = true + break + } + } + if alreadyAdded { + continue + } + + // Add this orphan + orphans = append(orphans, orphanPath) + } + + return orphans +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(path) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(rootModulePath) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(m.Path) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// IncrementSerialMaybe increments the serial number of this state +// if it different from the other state. +func (s *State) IncrementSerialMaybe(other *State) { + if s == nil { + return + } + if other == nil { + return + } + s.Lock() + defer s.Unlock() + + if s.Serial > other.Serial { + return + } + if other.TFVersion != s.TFVersion || !s.equal(other) { + if other.Serial > s.Serial { + s.Serial = other.Serial + } + + s.Serial++ + } +} + +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return SemVersion.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + if s.moduleByPath(rootModulePath) == nil { + s.addModule(rootModulePath) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + s.Lineage = uuid.NewV4().String() + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + Config map[string]interface{} `json:"config"` // Backend raw config + + // Hash is the hash code to uniquely identify the original source + // configuration. We use this to detect when there is a change in + // configuration even when "type" isn't changed. + Hash uint64 `json:"hash"` +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Rehash returns a unique content hash for this backend's configuration +// as a uint64 value. +// The Hash stored in the backend state needs to match the config itself, but +// we need to compare the backend config after it has been combined with all +// options. +// This function must match the implementation used by config.Backend. +func (s *BackendState) Rehash() uint64 { + if s == nil { + return 0 + } + + cfg := config.Backend{ + Type: s.Type, + RawConfig: &config.RawConfig{ + Raw: s.Config, + }, + } + + return cfg.Rehash() +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) deepcopy() *RemoteState { + r.Lock() + defer r.Unlock() + + confCopy := make(map[string]string, len(r.Config)) + for k, v := range r.Config { + confCopy[k] = v + } + return &RemoteState{ + Type: r.Type, + Config: confCopy, + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +func (s *OutputState) deepcopy() *OutputState { + if s == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(fmt.Errorf("Error copying output value: %s", err)) + } + + return stateCopy.(*OutputState) +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *config.Config) []string { + m.Lock() + defer m.Unlock() + + keys := make(map[string]struct{}) + for k, _ := range m.Resources { + keys[k] = struct{}{} + } + + if c != nil { + for _, r := range c.Resources { + delete(keys, r.Id()) + + for k, _ := range keys { + if strings.HasPrefix(k, r.Id()+".") { + delete(keys, k) + } + } + } + } + + result := make([]string, 0, len(keys)) + for k, _ := range keys { + result = append(result, k) + } + + return result +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == config.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode config.ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case config.ManagedResourceMode: + prefix = "" + case config.DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := config.ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = config.DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +func (s *ResourceState) deepcopy() *ResourceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*ResourceState) +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + if !reflect.DeepEqual(s.Meta, other.Meta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = config.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s == nil || s.ID == "" { + return "" + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %v", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to Terraform 0.3. Please upgrade\n" + + "this state file using Terraform 0.6.16 prior to using it with\n" + + "Terraform 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + buf := bufio.NewReader(src) + if _, err := buf.Peek(1); err != nil { + // the error is either io.EOF or "invalid argument", and both are from + // an empty state. + return nil, ErrNoState + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := ioutil.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %v", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %v", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + SemVersion.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + SemVersion.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + SemVersion.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %s", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %v", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go new file mode 100644 index 00000000..11637303 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go @@ -0,0 +1,374 @@ +package terraform + +import "fmt" + +// Add adds the item in the state at the given address. +// +// The item can be a ModuleState, ResourceState, or InstanceState. Depending +// on the item type, the address may or may not be valid. For example, a +// module cannot be moved to a resource address, however a resource can be +// moved to a module address (it retains the same name, under that resource). +// +// The item can also be a []*ModuleState, which is the case for nested +// modules. In this case, Add will expect the zero-index to be the top-most +// module to add and will only nest children from there. For semantics, this +// is equivalent to module => module. +// +// The full semantics of Add: +// +// ┌───────────────────┬───────────────────┬───────────────────┐ +// │ Module Address │ Resource Address │ Instance Address │ +// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤ +// │ ModuleState │ ✓ │ x │ x │ +// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ +// │ ResourceState │ ✓ │ ✓ │ maybe* │ +// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ +// │ Instance State │ ✓ │ ✓ │ ✓ │ +// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘ +// +// *maybe - Resources can be added at an instance address only if the resource +// represents a single instance (primary). Example: +// "aws_instance.foo" can be moved to "aws_instance.bar.tainted" +// +func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error { + // Parse the address + + toAddr, err := ParseResourceAddress(toAddrRaw) + if err != nil { + return err + } + + // Parse the from address + fromAddr, err := ParseResourceAddress(fromAddrRaw) + if err != nil { + return err + } + + // Determine the types + from := detectValueAddLoc(raw) + to := detectAddrAddLoc(toAddr) + + // Find the function to do this + fromMap, ok := stateAddFuncs[from] + if !ok { + return fmt.Errorf("invalid source to add to state: %T", raw) + } + f, ok := fromMap[to] + if !ok { + return fmt.Errorf("invalid destination: %s (%d)", toAddr, to) + } + + // Call the migrator + if err := f(s, fromAddr, toAddr, raw); err != nil { + return err + } + + // Prune the state + s.prune() + return nil +} + +func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { + // raw can be either *ModuleState or []*ModuleState. The former means + // we're moving just one module. The latter means we're moving a module + // and children. + root := raw + var rest []*ModuleState + if list, ok := raw.([]*ModuleState); ok { + // We need at least one item + if len(list) == 0 { + return fmt.Errorf("module move with no value to: %s", addr) + } + + // The first item is always the root + root = list[0] + if len(list) > 1 { + rest = list[1:] + } + } + + // Get the actual module state + src := root.(*ModuleState).deepcopy() + + // If the target module exists, it is an error + path := append([]string{"root"}, addr.Path...) + if s.ModuleByPath(path) != nil { + return fmt.Errorf("module target is not empty: %s", addr) + } + + // Create it and copy our outputs and dependencies + mod := s.AddModule(path) + mod.Outputs = src.Outputs + mod.Dependencies = src.Dependencies + + // Go through the resources perform an add for each of those + for k, v := range src.Resources { + resourceKey, err := ParseResourceStateKey(k) + if err != nil { + return err + } + + // Update the resource address for this + addrCopy := *addr + addrCopy.Type = resourceKey.Type + addrCopy.Name = resourceKey.Name + addrCopy.Index = resourceKey.Index + addrCopy.Mode = resourceKey.Mode + + // Perform an add + if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil { + return err + } + } + + // Add all the children if we have them + for _, item := range rest { + // If item isn't a descendent of our root, then ignore it + if !src.IsDescendent(item) { + continue + } + + // It is! Strip the leading prefix and attach that to our address + extra := item.Path[len(src.Path):] + addrCopy := addr.Copy() + addrCopy.Path = append(addrCopy.Path, extra...) + + // Add it + s.Add(fromAddr.String(), addrCopy.String(), item) + } + + return nil +} + +func stateAddFunc_Resource_Module( + s *State, from, to *ResourceAddress, raw interface{}) error { + // Build the more specific to addr + addr := *to + addr.Type = from.Type + addr.Name = from.Name + + return s.Add(from.String(), addr.String(), raw) +} + +func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { + // raw can be either *ResourceState or []*ResourceState. The former means + // we're moving just one resource. The latter means we're moving a count + // of resources. + if list, ok := raw.([]*ResourceState); ok { + // We need at least one item + if len(list) == 0 { + return fmt.Errorf("resource move with no value to: %s", addr) + } + + // If there is an index, this is an error since we can't assign + // a set of resources to a single index + if addr.Index >= 0 && len(list) > 1 { + return fmt.Errorf( + "multiple resources can't be moved to a single index: "+ + "%s => %s", fromAddr, addr) + } + + // Add each with a specific index + for i, rs := range list { + addrCopy := addr.Copy() + addrCopy.Index = i + + if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil { + return err + } + } + + return nil + } + + src := raw.(*ResourceState).deepcopy() + + // Initialize the resource + resourceRaw, exists := stateAddInitAddr(s, addr) + if exists { + return fmt.Errorf("resource exists and not empty: %s", addr) + } + resource := resourceRaw.(*ResourceState) + resource.Type = src.Type + resource.Dependencies = src.Dependencies + resource.Provider = src.Provider + + // Move the primary + if src.Primary != nil { + addrCopy := *addr + addrCopy.InstanceType = TypePrimary + addrCopy.InstanceTypeSet = true + if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil { + return err + } + } + + // Move all deposed + if len(src.Deposed) > 0 { + resource.Deposed = src.Deposed + } + + return nil +} + +func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { + src := raw.(*InstanceState).DeepCopy() + + // Create the instance + instanceRaw, _ := stateAddInitAddr(s, addr) + instance := instanceRaw.(*InstanceState) + + // Set it + instance.Set(src) + + return nil +} + +func stateAddFunc_Instance_Module( + s *State, from, to *ResourceAddress, raw interface{}) error { + addr := *to + addr.Type = from.Type + addr.Name = from.Name + + return s.Add(from.String(), addr.String(), raw) +} + +func stateAddFunc_Instance_Resource( + s *State, from, to *ResourceAddress, raw interface{}) error { + addr := *to + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = true + + return s.Add(from.String(), addr.String(), raw) +} + +// stateAddFunc is the type of function for adding an item to a state +type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error + +// stateAddFuncs has the full matrix mapping of the state adders. +var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc + +func init() { + stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{ + stateAddModule: { + stateAddModule: stateAddFunc_Module_Module, + }, + stateAddResource: { + stateAddModule: stateAddFunc_Resource_Module, + stateAddResource: stateAddFunc_Resource_Resource, + }, + stateAddInstance: { + stateAddInstance: stateAddFunc_Instance_Instance, + stateAddModule: stateAddFunc_Instance_Module, + stateAddResource: stateAddFunc_Instance_Resource, + }, + } +} + +// stateAddLoc is an enum to represent the location where state is being +// moved from/to. We use this for quick lookups in a function map. +type stateAddLoc uint + +const ( + stateAddInvalid stateAddLoc = iota + stateAddModule + stateAddResource + stateAddInstance +) + +// detectAddrAddLoc detects the state type for the given address. This +// function is specifically not unit tested since we consider the State.Add +// functionality to be comprehensive enough to cover this. +func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc { + if addr.Name == "" { + return stateAddModule + } + + if !addr.InstanceTypeSet { + return stateAddResource + } + + return stateAddInstance +} + +// detectValueAddLoc determines the stateAddLoc value from the raw value +// that is some State structure. +func detectValueAddLoc(raw interface{}) stateAddLoc { + switch raw.(type) { + case *ModuleState: + return stateAddModule + case []*ModuleState: + return stateAddModule + case *ResourceState: + return stateAddResource + case []*ResourceState: + return stateAddResource + case *InstanceState: + return stateAddInstance + default: + return stateAddInvalid + } +} + +// stateAddInitAddr takes a ResourceAddress and creates the non-existing +// resources up to that point, returning the empty (or existing) interface +// at that address. +func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) { + addType := detectAddrAddLoc(addr) + + // Get the module + path := append([]string{"root"}, addr.Path...) + exists := true + mod := s.ModuleByPath(path) + if mod == nil { + mod = s.AddModule(path) + exists = false + } + if addType == stateAddModule { + return mod, exists + } + + // Add the resource + resourceKey := (&ResourceStateKey{ + Name: addr.Name, + Type: addr.Type, + Index: addr.Index, + Mode: addr.Mode, + }).String() + exists = true + resource, ok := mod.Resources[resourceKey] + if !ok { + resource = &ResourceState{Type: addr.Type} + resource.init() + mod.Resources[resourceKey] = resource + exists = false + } + if addType == stateAddResource { + return resource, exists + } + + // Get the instance + exists = true + instance := &InstanceState{} + switch addr.InstanceType { + case TypePrimary, TypeTainted: + if v := resource.Primary; v != nil { + instance = resource.Primary + } else { + exists = false + } + case TypeDeposed: + idx := addr.Index + if addr.Index < 0 { + idx = 0 + } + if len(resource.Deposed) > idx { + instance = resource.Deposed[idx] + } else { + resource.Deposed = append(resource.Deposed, instance) + exists = false + } + } + + return instance, exists +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go new file mode 100644 index 00000000..2dcb11b7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = false + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go new file mode 100644 index 00000000..aa13cce8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go @@ -0,0 +1,189 @@ +package terraform + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*State, error) { + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*ModuleState, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &State{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + newState.sort() + newState.init() + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &RemoteState{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*OutputState) + for key, output := range old.Outputs { + outputs[key] = &OutputState{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*ResourceState) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &ModuleState{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*InstanceState, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &ResourceState{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + ephemeral, err := old.Ephemeral.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &InstanceState{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Ephemeral: *ephemeral, + Meta: newMeta, + }, nil +} + +func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { + connInfo, err := copystructure.Copy(old.ConnInfo) + if err != nil { + return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) + } + return &EphemeralState{ + ConnInfo: connInfo.(map[string]string), + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go new file mode 100644 index 00000000..e52d35fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" +) + +// The upgrade process from V2 to V3 state does not affect the structure, +// so we do not need to redeclare all of the structs involved - we just +// take a deep copy of the old structure and assert the version number is +// as we expect. +func upgradeStateV2ToV3(old *State) (*State, error) { + new := old.DeepCopy() + + // Ensure the copied version is v2 before attempting to upgrade + if new.Version != 2 { + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + + "a state which is not version 2.") + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + if resource.Deposed != nil { + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *InstanceState) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go new file mode 100644 index 00000000..68cffb41 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go @@ -0,0 +1,145 @@ +package terraform + +// stateV1 keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +// +// stateV1 is _only used for the purposes of backwards compatibility +// and is no longer used in Terraform. +// +// For the upgrade process, see state_upgrade_v1_to_v2.go +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral ephemeralStateV1 `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go new file mode 100644 index 00000000..f4a431a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go @@ -0,0 +1,52 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// GraphTransformer is the interface that transformers implement. This +// interface is only for transforms that need entire graph visibility. +type GraphTransformer interface { + Transform(*Graph) error +} + +// GraphVertexTransformer is an interface that transforms a single +// Vertex within with graph. This is a specialization of GraphTransformer +// that makes it easy to do vertex replacement. +// +// The GraphTransformer that runs through the GraphVertexTransformers is +// VertexTransformer. +type GraphVertexTransformer interface { + Transform(dag.Vertex) (dag.Vertex, error) +} + +// GraphTransformIf is a helper function that conditionally returns a +// GraphTransformer given. This is useful for calling inline a sequence +// of transforms without having to split it up into multiple append() calls. +func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { + if f() { + return then + } + + return nil +} + +type graphTransformerMulti struct { + Transforms []GraphTransformer +} + +func (t *graphTransformerMulti) Transform(g *Graph) error { + for _, t := range t.Transforms { + if err := t.Transform(g); err != nil { + return err + } + } + + return nil +} + +// GraphTransformMulti combines multiple graph transformers into a single +// GraphTransformer that runs all the individual graph transformers. +func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { + return &graphTransformerMulti{Transforms: ts} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go new file mode 100644 index 00000000..61bce853 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go @@ -0,0 +1,135 @@ +package terraform + +import ( + "errors" + "fmt" + "log" + "sync" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ConfigTransformer is a GraphTransformer that adds all the resources +// from the configuration to the graph. +// +// The module used to configure this transformer must be the root module. +// +// Only resources are added to the graph. Variables, outputs, and +// providers must be added via other transforms. +// +// Unlike ConfigTransformerOld, this transformer creates a graph with +// all resources including module resources, rather than creating module +// nodes that are then "flattened". +type ConfigTransformer struct { + Concrete ConcreteResourceNodeFunc + + // Module is the module to add resources from. + Module *module.Tree + + // Unique will only add resources that aren't already present in the graph. + Unique bool + + // Mode will only add resources that match the given mode + ModeFilter bool + Mode config.ResourceMode + + l sync.Mutex + uniqueMap map[string]struct{} +} + +func (t *ConfigTransformer) Transform(g *Graph) error { + // Lock since we use some internal state + t.l.Lock() + defer t.l.Unlock() + + // If no module is given, we don't do anything + if t.Module == nil { + return nil + } + + // If the module isn't loaded, that is simply an error + if !t.Module.Loaded() { + return errors.New("module must be loaded for ConfigTransformer") + } + + // Reset the uniqueness map. If we're tracking uniques, then populate + // it with addresses. + t.uniqueMap = make(map[string]struct{}) + defer func() { t.uniqueMap = nil }() + if t.Unique { + for _, v := range g.Vertices() { + if rn, ok := v.(GraphNodeResource); ok { + t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} + } + } + } + + // Start the transformation process + return t.transform(g, t.Module) +} + +func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, do nothing + if m == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, m); err != nil { + return err + } + + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + + return nil +} + +func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) + + // Get the configuration for this module + conf := m.Config() + + // Build the path we're at + path := m.Path() + + // Write all the resources out + for _, r := range conf.Resources { + // Build the resource address + addr, err := parseResourceAddressConfig(r) + if err != nil { + panic(fmt.Sprintf( + "Error parsing config address, this is a bug: %#v", r)) + } + addr.Path = path + + // If this is already in our uniqueness map, don't add it again + if _, ok := t.uniqueMap[addr.String()]; ok { + continue + } + + // Remove non-matching modes + if t.ModeFilter && addr.Mode != t.Mode { + continue + } + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go new file mode 100644 index 00000000..2148cef4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go @@ -0,0 +1,168 @@ +package terraform + +import "fmt" + +// DeposedTransformer is a GraphTransformer that adds deposed resources +// to the graph. +type DeposedTransformer struct { + // State is the global state. We'll automatically find the correct + // ModuleState based on the Graph.Path that is being transformed. + State *State + + // View, if non-empty, is the ModuleState.View used around the state + // to find deposed resources. + View string +} + +func (t *DeposedTransformer) Transform(g *Graph) error { + state := t.State.ModuleByPath(g.Path) + if state == nil { + // If there is no state for our module there can't be any deposed + // resources, since they live in the state. + return nil + } + + // If we have a view, apply it now + if t.View != "" { + state = state.View(t.View) + } + + // Go through all the resources in our state to look for deposed resources + for k, rs := range state.Resources { + // If we have no deposed resources, then move on + if len(rs.Deposed) == 0 { + continue + } + deposed := rs.Deposed + + for i, _ := range deposed { + g.Add(&graphNodeDeposedResource{ + Index: i, + ResourceName: k, + ResourceType: rs.Type, + Provider: rs.Provider, + }) + } + } + + return nil +} + +// graphNodeDeposedResource is the graph vertex representing a deposed resource. +type graphNodeDeposedResource struct { + Index int + ResourceName string + ResourceType string + Provider string +} + +func (n *graphNodeDeposedResource) Name() string { + return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) +} + +func (n *graphNodeDeposedResource) ProvidedBy() []string { + return []string{resourceProvider(n.ResourceName, n.Provider)} +} + +// GraphNodeEvalable impl. +func (n *graphNodeDeposedResource) EvalTree() EvalNode { + var provider ResourceProvider + var state *InstanceState + + seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} + + // Build instance info + info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType} + seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) + + // Refresh the resource + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadStateDeposed{ + Name: n.ResourceName, + Output: &state, + Index: n.Index, + }, + &EvalRefresh{ + Info: info, + Provider: &provider, + State: &state, + Output: &state, + }, + &EvalWriteStateDeposed{ + Name: n.ResourceName, + ResourceType: n.ResourceType, + Provider: n.Provider, + State: &state, + Index: n.Index, + }, + }, + }, + }) + + // Apply + var diff *InstanceDiff + var err error + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkApply, walkDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadStateDeposed{ + Name: n.ResourceName, + Output: &state, + Index: n.Index, + }, + &EvalDiffDestroy{ + Info: info, + State: &state, + Output: &diff, + }, + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diff, + }, + &EvalApply{ + Info: info, + State: &state, + Diff: &diff, + Provider: &provider, + Output: &state, + Error: &err, + }, + // Always write the resource back to the state deposed... if it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + &EvalWriteStateDeposed{ + Name: n.ResourceName, + ResourceType: n.ResourceType, + Provider: n.Provider, + State: &state, + Index: n.Index, + }, + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + &EvalReturnError{ + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + }, + }) + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go new file mode 100644 index 00000000..982c098b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeExapndable is an interface that nodes can implement to +// signal that they can be expanded. Expanded nodes turn into +// GraphNodeSubgraph nodes within the graph. +type GraphNodeExpandable interface { + Expand(GraphBuilder) (GraphNodeSubgraph, error) +} + +// GraphNodeDynamicExpandable is an interface that nodes can implement +// to signal that they can be expanded at eval-time (hence dynamic). +// These nodes are given the eval context and are expected to return +// a new subgraph. +type GraphNodeDynamicExpandable interface { + DynamicExpand(EvalContext) (*Graph, error) +} + +// GraphNodeSubgraph is an interface a node can implement if it has +// a larger subgraph that should be walked. +type GraphNodeSubgraph interface { + Subgraph() dag.Grapher +} + +// ExpandTransform is a transformer that does a subgraph expansion +// at graph transform time (vs. at eval time). The benefit of earlier +// subgraph expansion is that errors with the graph build can be detected +// at an earlier stage. +type ExpandTransform struct { + Builder GraphBuilder +} + +func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { + ev, ok := v.(GraphNodeExpandable) + if !ok { + // This isn't an expandable vertex, so just ignore it. + return v, nil + } + + // Expand the subgraph! + log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) + return ev.Expand(t.Builder) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go new file mode 100644 index 00000000..081df2f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -0,0 +1,241 @@ +package terraform + +import ( + "fmt" +) + +// ImportStateTransformer is a GraphTransformer that adds nodes to the +// graph to represent the imports we want to do for resources. +type ImportStateTransformer struct { + Targets []*ImportTarget +} + +func (t *ImportStateTransformer) Transform(g *Graph) error { + nodes := make([]*graphNodeImportState, 0, len(t.Targets)) + for _, target := range t.Targets { + addr, err := ParseResourceAddress(target.Addr) + if err != nil { + return fmt.Errorf( + "failed to parse resource address '%s': %s", + target.Addr, err) + } + + nodes = append(nodes, &graphNodeImportState{ + Addr: addr, + ID: target.ID, + Provider: target.Provider, + }) + } + + // Build the graph vertices + for _, n := range nodes { + g.Add(n) + } + + return nil +} + +type graphNodeImportState struct { + Addr *ResourceAddress // Addr is the resource address to import to + ID string // ID is the ID to import as + Provider string // Provider string + + states []*InstanceState +} + +func (n *graphNodeImportState) Name() string { + return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) +} + +func (n *graphNodeImportState) ProvidedBy() []string { + return []string{resourceProvider(n.Addr.Type, n.Provider)} +} + +// GraphNodeSubPath +func (n *graphNodeImportState) Path() []string { + return normalizeModulePath(n.Addr.Path) +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportState) EvalTree() EvalNode { + var provider ResourceProvider + info := &InstanceInfo{ + Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name), + ModulePath: n.Path(), + Type: n.Addr.Type, + } + + // Reset our states + n.states = nil + + // Return our sequence + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalImportState{ + Provider: &provider, + Info: info, + Id: n.ID, + Output: &n.states, + }, + }, + } +} + +// GraphNodeDynamicExpandable impl. +// +// We use DynamicExpand as a way to generate the subgraph of refreshes +// and state inserts we need to do for our import state. Since they're new +// resources they don't depend on anything else and refreshes are isolated +// so this is nearly a perfect use case for dynamic expand. +func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + g := &Graph{Path: ctx.Path()} + + // nameCounter is used to de-dup names in the state. + nameCounter := make(map[string]int) + + // Compile the list of addresses that we'll be inserting into the state. + // We do this ahead of time so we can verify that we aren't importing + // something that already exists. + addrs := make([]*ResourceAddress, len(n.states)) + for i, state := range n.states { + addr := *n.Addr + if t := state.Ephemeral.Type; t != "" { + addr.Type = t + } + + // Determine if we need to suffix the name to de-dup + key := addr.String() + count, ok := nameCounter[key] + if ok { + count++ + addr.Name += fmt.Sprintf("-%d", count) + } + nameCounter[key] = count + + // Add it to our list + addrs[i] = &addr + } + + // Verify that all the addresses are clear + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + filter := &StateFilter{State: state} + for _, addr := range addrs { + result, err := filter.Filter(addr.String()) + if err != nil { + return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) + } + + // Go through the filter results and it is an error if we find + // a matching InstanceState, meaning that we would have a collision. + for _, r := range result { + if _, ok := r.Value.(*InstanceState); ok { + return nil, fmt.Errorf( + "Can't import %s, would collide with an existing resource.\n\n"+ + "Please remove or rename this resource before continuing.", + addr) + } + } + } + + // For each of the states, we add a node to handle the refresh/add to state. + // "n.states" is populated by our own EvalTree with the result of + // ImportState. Since DynamicExpand is always called after EvalTree, this + // is safe. + for i, state := range n.states { + g.Add(&graphNodeImportStateSub{ + Target: addrs[i], + Path_: n.Path(), + State: state, + Provider: n.Provider, + }) + } + + // Root transform for a single root + t := &RootTransformer{} + if err := t.Transform(g); err != nil { + return nil, err + } + + // Done! + return g, nil +} + +// graphNodeImportStateSub is the sub-node of graphNodeImportState +// and is part of the subgraph. This node is responsible for refreshing +// and adding a resource to the state once it is imported. +type graphNodeImportStateSub struct { + Target *ResourceAddress + State *InstanceState + Path_ []string + Provider string +} + +func (n *graphNodeImportStateSub) Name() string { + return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) +} + +func (n *graphNodeImportStateSub) Path() []string { + return n.Path_ +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportStateSub) EvalTree() EvalNode { + // If the Ephemeral type isn't set, then it is an error + if n.State.Ephemeral.Type == "" { + err := fmt.Errorf( + "import of %s didn't set type for %s", + n.Target.String(), n.State.ID) + return &EvalReturnError{Error: &err} + } + + // DeepCopy so we're only modifying our local copy + state := n.State.DeepCopy() + + // Build the resource info + info := &InstanceInfo{ + Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name), + ModulePath: n.Path_, + Type: n.State.Ephemeral.Type, + } + + // Key is the resource key + key := &ResourceStateKey{ + Name: n.Target.Name, + Type: info.Type, + Index: n.Target.Index, + } + + // The eval sequence + var provider ResourceProvider + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: resourceProvider(info.Type, n.Provider), + Output: &provider, + }, + &EvalRefresh{ + Provider: &provider, + State: &state, + Info: info, + Output: &state, + }, + &EvalImportStateVerify{ + Info: info, + Id: n.State.ID, + State: &state, + }, + &EvalWriteState{ + Name: key.String(), + ResourceType: info.Type, + Provider: resourceProvider(info.Type, n.Provider), + State: &state, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go new file mode 100644 index 00000000..b260f4ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go @@ -0,0 +1,59 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// OutputTransformer is a GraphTransformer that adds all the outputs +// in the configuration to the graph. +// +// This is done for the apply graph builder even if dependent nodes +// aren't changing since there is no downside: the state will be available +// even if the dependent items aren't changing. +type OutputTransformer struct { + Module *module.Tree +} + +func (t *OutputTransformer) Transform(g *Graph) error { + return t.transform(g, t.Module) +} + +func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, no outputs + if m == nil { + return nil + } + + // Transform all the children. We must do this first because + // we can reference module outputs and they must show up in the + // reference map. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + + // If we have no outputs, we're done! + os := m.Config().Outputs + if len(os) == 0 { + return nil + } + + // Add all outputs here + for _, o := range os { + // Build the node. + // + // NOTE: For now this is just an "applyable" output. As we build + // new graph builders for the other operations I suspect we'll + // find a way to parameterize this, require new transforms, etc. + node := &NodeApplyableOutput{ + PathValue: normalizeModulePath(m.Path()), + Config: o, + } + + // Add it! + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go new file mode 100644 index 00000000..b9695d52 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -0,0 +1,380 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeProvider is an interface that nodes that can be a provider +// must implement. The ProviderName returned is the name of the provider +// they satisfy. +type GraphNodeProvider interface { + ProviderName() string +} + +// GraphNodeCloseProvider is an interface that nodes that can be a close +// provider must implement. The CloseProviderName returned is the name of +// the provider they satisfy. +type GraphNodeCloseProvider interface { + CloseProviderName() string +} + +// GraphNodeProviderConsumer is an interface that nodes that require +// a provider must implement. ProvidedBy must return the name of the provider +// to use. +type GraphNodeProviderConsumer interface { + ProvidedBy() []string +} + +// ProviderTransformer is a GraphTransformer that maps resources to +// providers within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProviderTransformer struct{} + +func (t *ProviderTransformer) Transform(g *Graph) error { + // Go through the other nodes and match them to providers they need + var err error + m := providerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProviderConsumer); ok { + for _, p := range pv.ProvidedBy() { + target := m[providerMapKey(p, pv)] + if target == nil { + println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) + err = multierror.Append(err, fmt.Errorf( + "%s: provider %s couldn't be found", + dag.VertexName(v), p)) + continue + } + + g.Connect(dag.BasicEdge(v, target)) + } + } + } + + return err +} + +// CloseProviderTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provider connections that aren't needed anymore. +// A provider connection is not needed anymore once all depended resources +// in the graph are evaluated. +type CloseProviderTransformer struct{} + +func (t *CloseProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + cpm := closeProviderVertexMap(g) + var err error + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProviderConsumer); ok { + for _, p := range pv.ProvidedBy() { + key := p + source := cpm[key] + + if source == nil { + // Create a new graphNodeCloseProvider and add it to the graph + source = &graphNodeCloseProvider{ProviderNameValue: p} + g.Add(source) + + // Close node needs to depend on provider + provider, ok := pm[key] + if !ok { + err = multierror.Append(err, fmt.Errorf( + "%s: provider %s couldn't be found for closing", + dag.VertexName(v), p)) + continue + } + g.Connect(dag.BasicEdge(source, provider)) + + // Make sure we also add the new graphNodeCloseProvider to the map + // so we don't create and add any duplicate graphNodeCloseProviders. + cpm[key] = source + } + + // Close node depends on all nodes provided by the provider + g.Connect(dag.BasicEdge(source, v)) + } + } + } + + return err +} + +// MissingProviderTransformer is a GraphTransformer that adds nodes +// for missing providers into the graph. Specifically, it creates provider +// configuration nodes for all the providers that we support. These are +// pruned later during an optimization pass. +type MissingProviderTransformer struct { + // Providers is the list of providers we support. + Providers []string + + // AllowAny will not check that a provider is supported before adding + // it to the graph. + AllowAny bool + + // Concrete, if set, overrides how the providers are made. + Concrete ConcreteProviderNodeFunc +} + +func (t *MissingProviderTransformer) Transform(g *Graph) error { + // Initialize factory + if t.Concrete == nil { + t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { + return a + } + } + + // Create a set of our supported providers + supported := make(map[string]struct{}, len(t.Providers)) + for _, v := range t.Providers { + supported[v] = struct{}{} + } + + // Get the map of providers we already have in our graph + m := providerVertexMap(g) + + // Go through all the provider consumers and make sure we add + // that provider if it is missing. We use a for loop here instead + // of "range" since we'll modify check as we go to add more to check. + check := g.Vertices() + for i := 0; i < len(check); i++ { + v := check[i] + + pv, ok := v.(GraphNodeProviderConsumer) + if !ok { + continue + } + + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + var path []string + if sp, ok := pv.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + path = raw + } + } + + for _, p := range pv.ProvidedBy() { + key := providerMapKey(p, pv) + if _, ok := m[key]; ok { + // This provider already exists as a configure node + continue + } + + // If the provider has an alias in it, we just want the type + ptype := p + if idx := strings.IndexRune(p, '.'); idx != -1 { + ptype = p[:idx] + } + + if !t.AllowAny { + if _, ok := supported[ptype]; !ok { + // If we don't support the provider type, skip it. + // Validation later will catch this as an error. + continue + } + } + + // Add the missing provider node to the graph + v := t.Concrete(&NodeAbstractProvider{ + NameValue: p, + PathValue: path, + }).(dag.Vertex) + if len(path) > 0 { + // We'll need the parent provider as well, so let's + // add a dummy node to check to make sure that we add + // that parent provider. + check = append(check, &graphNodeProviderConsumerDummy{ + ProviderValue: p, + PathValue: path[:len(path)-1], + }) + } + + m[key] = g.Add(v) + } + } + + return nil +} + +// ParentProviderTransformer connects provider nodes to their parents. +// +// This works by finding nodes that are both GraphNodeProviders and +// GraphNodeSubPath. It then connects the providers to their parent +// path. +type ParentProviderTransformer struct{} + +func (t *ParentProviderTransformer) Transform(g *Graph) error { + // Make a mapping of path to dag.Vertex, where path is: "path.name" + m := make(map[string]dag.Vertex) + + // Also create a map that maps a provider to its parent + parentMap := make(map[dag.Vertex]string) + for _, raw := range g.Vertices() { + // If it is the flat version, then make it the non-flat version. + // We eventually want to get rid of the flat version entirely so + // this is a stop-gap while it still exists. + var v dag.Vertex = raw + + // Only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { + continue + } + + // Also require a subpath, if there is no subpath then we + // just totally ignore it. The expectation of this transform is + // that it is used with a graph builder that is already flattened. + var path []string + if pn, ok := raw.(GraphNodeSubPath); ok { + path = pn.Path() + } + path = normalizeModulePath(path) + + // Build the key with path.name i.e. "child.subchild.aws" + key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) + m[key] = raw + + // Determine the parent if we're non-root. This is length 1 since + // the 0 index should be "root" since we normalize above. + if len(path) > 1 { + path = path[:len(path)-1] + key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) + parentMap[raw] = key + } + } + + // Connect! + for v, key := range parentMap { + if parent, ok := m[key]; ok { + g.Connect(dag.BasicEdge(v, parent)) + } + } + + return nil +} + +// PruneProviderTransformer is a GraphTransformer that prunes all the +// providers that aren't needed from the graph. A provider is unneeded if +// no resource or module is using that provider. +type PruneProviderTransformer struct{} + +func (t *PruneProviderTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about the providers + if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { + continue + } + // Does anything depend on this? If not, then prune it. + if s := g.UpEdges(v); s.Len() == 0 { + if nv, ok := v.(dag.NamedVertex); ok { + log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) + } + g.Remove(v) + } + } + + return nil +} + +// providerMapKey is a helper that gives us the key to use for the +// maps returned by things such as providerVertexMap. +func providerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + pathPrefix = modulePrefixStr(raw) + "." + } + } + + return pathPrefix + k +} + +func providerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvider); ok { + key := providerMapKey(pv.ProviderName(), v) + m[key] = v + } + } + + return m +} + +func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeCloseProvider); ok { + m[pv.CloseProviderName()] = v + } + } + + return m +} + +type graphNodeCloseProvider struct { + ProviderNameValue string +} + +func (n *graphNodeCloseProvider) Name() string { + return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvider) EvalTree() EvalNode { + return CloseProviderEvalTree(n.ProviderNameValue) +} + +// GraphNodeDependable impl. +func (n *graphNodeCloseProvider) DependableName() []string { + return []string{n.Name()} +} + +func (n *graphNodeCloseProvider) CloseProviderName() string { + return n.ProviderNameValue +} + +// GraphNodeDotter impl. +func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + if !opts.Verbose { + return nil + } + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} + +// RemovableIfNotTargeted +func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// graphNodeProviderConsumerDummy is a struct that never enters the real +// graph (though it could to no ill effect). It implements +// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force +// certain transformations. +type graphNodeProviderConsumerDummy struct { + ProviderValue string + PathValue []string +} + +func (n *graphNodeProviderConsumerDummy) Path() []string { + return n.PathValue +} + +func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { + return []string{n.ProviderValue} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go new file mode 100644 index 00000000..f49d8241 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go @@ -0,0 +1,206 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeProvisioner is an interface that nodes that can be a provisioner +// must implement. The ProvisionerName returned is the name of the provisioner +// they satisfy. +type GraphNodeProvisioner interface { + ProvisionerName() string +} + +// GraphNodeCloseProvisioner is an interface that nodes that can be a close +// provisioner must implement. The CloseProvisionerName returned is the name +// of the provisioner they satisfy. +type GraphNodeCloseProvisioner interface { + CloseProvisionerName() string +} + +// GraphNodeProvisionerConsumer is an interface that nodes that require +// a provisioner must implement. ProvisionedBy must return the name of the +// provisioner to use. +type GraphNodeProvisionerConsumer interface { + ProvisionedBy() []string +} + +// ProvisionerTransformer is a GraphTransformer that maps resources to +// provisioners within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProvisionerTransformer struct{} + +func (t *ProvisionerTransformer) Transform(g *Graph) error { + // Go through the other nodes and match them to provisioners they need + var err error + m := provisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + key := provisionerMapKey(p, pv) + if m[key] == nil { + err = multierror.Append(err, fmt.Errorf( + "%s: provisioner %s couldn't be found", + dag.VertexName(v), p)) + continue + } + + g.Connect(dag.BasicEdge(v, m[key])) + } + } + } + + return err +} + +// MissingProvisionerTransformer is a GraphTransformer that adds nodes +// for missing provisioners into the graph. +type MissingProvisionerTransformer struct { + // Provisioners is the list of provisioners we support. + Provisioners []string +} + +func (t *MissingProvisionerTransformer) Transform(g *Graph) error { + // Create a set of our supported provisioners + supported := make(map[string]struct{}, len(t.Provisioners)) + for _, v := range t.Provisioners { + supported[v] = struct{}{} + } + + // Get the map of provisioners we already have in our graph + m := provisionerVertexMap(g) + + // Go through all the provisioner consumers and make sure we add + // that provisioner if it is missing. + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProvisionerConsumer) + if !ok { + continue + } + + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + var path []string + if sp, ok := pv.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + path = raw + } + } + + for _, p := range pv.ProvisionedBy() { + // Build the key for storing in the map + key := provisionerMapKey(p, pv) + + if _, ok := m[key]; ok { + // This provisioner already exists as a configure node + continue + } + + if _, ok := supported[p]; !ok { + // If we don't support the provisioner type, skip it. + // Validation later will catch this as an error. + continue + } + + // Build the vertex + var newV dag.Vertex = &NodeProvisioner{ + NameValue: p, + PathValue: path, + } + + // Add the missing provisioner node to the graph + m[key] = g.Add(newV) + } + } + + return nil +} + +// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provisioner connections that aren't needed +// anymore. A provisioner connection is not needed anymore once all depended +// resources in the graph are evaluated. +type CloseProvisionerTransformer struct{} + +func (t *CloseProvisionerTransformer) Transform(g *Graph) error { + m := closeProvisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + source := m[p] + + if source == nil { + // Create a new graphNodeCloseProvisioner and add it to the graph + source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} + g.Add(source) + + // Make sure we also add the new graphNodeCloseProvisioner to the map + // so we don't create and add any duplicate graphNodeCloseProvisioners. + m[p] = source + } + + g.Connect(dag.BasicEdge(source, v)) + } + } + } + + return nil +} + +// provisionerMapKey is a helper that gives us the key to use for the +// maps returned by things such as provisionerVertexMap. +func provisionerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + pathPrefix = modulePrefixStr(raw) + "." + } + } + + return pathPrefix + k +} + +func provisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisioner); ok { + key := provisionerMapKey(pv.ProvisionerName(), v) + m[key] = v + } + } + + return m +} + +func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeCloseProvisioner); ok { + m[pv.CloseProvisionerName()] = v + } + } + + return m +} + +type graphNodeCloseProvisioner struct { + ProvisionerNameValue string +} + +func (n *graphNodeCloseProvisioner) Name() string { + return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { + return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} +} + +func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { + return n.ProvisionerNameValue +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go new file mode 100644 index 00000000..aee053d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go @@ -0,0 +1,38 @@ +package terraform + +import "github.com/hashicorp/terraform/dag" + +const rootNodeName = "root" + +// RootTransformer is a GraphTransformer that adds a root to the graph. +type RootTransformer struct{} + +func (t *RootTransformer) Transform(g *Graph) error { + // If we already have a good root, we're done + if _, err := g.Root(); err == nil { + return nil + } + + // Add a root + var root graphNodeRoot + g.Add(root) + + // Connect the root to all the edges that need it + for _, v := range g.Vertices() { + if v == root { + continue + } + + if g.UpEdges(v).Len() == 0 { + g.Connect(dag.BasicEdge(root, v)) + } + } + + return nil +} + +type graphNodeRoot struct{} + +func (n graphNodeRoot) Name() string { + return rootNodeName +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go new file mode 100644 index 00000000..125f9e30 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -0,0 +1,219 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + SetTargets([]ResourceAddress) +} + +// GraphNodeTargetDownstream is an interface for graph nodes that need to +// be remain present under targeting if any of their dependencies are targeted. +// TargetDownstream is called with the set of vertices that are direct +// dependencies for the node, and it should return true if the node must remain +// in the graph in support of those dependencies. +// +// This is used in situations where the dependency edges are representing an +// ordering relationship but the dependency must still be visited if its +// dependencies are visited. This is true for outputs, for example, since +// they must get updated if any of their dependent resources get updated, +// which would not normally be true if one of their dependencies were targeted. +type GraphNodeTargetDownstream interface { + TargetDownstream(targeted, untargeted *dag.Set) bool +} + +// TargetsTransformer is a GraphTransformer that, when the user specifies a +// list of resources to target, limits the graph to only those resources and +// their dependencies. +type TargetsTransformer struct { + // List of targeted resource names specified by the user + Targets []string + + // List of parsed targets, provided by callers like ResourceCountTransform + // that already have the targets parsed + ParsedTargets []ResourceAddress + + // Set to true when we're in a `terraform destroy` or a + // `terraform plan -destroy` + Destroy bool +} + +func (t *TargetsTransformer) Transform(g *Graph) error { + if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { + addrs, err := t.parseTargetAddresses() + if err != nil { + return err + } + + t.ParsedTargets = addrs + } + + if len(t.ParsedTargets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets) + if err != nil { + return err + } + + for _, v := range g.Vertices() { + removable := false + if _, ok := v.(GraphNodeResource); ok { + removable = true + } + if vr, ok := v.(RemovableIfNotTargeted); ok { + removable = vr.RemoveIfNotTargeted() + } + if removable && !targetedNodes.Include(v) { + log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) + g.Remove(v) + } + } + } + + return nil +} + +func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { + addrs := make([]ResourceAddress, len(t.Targets)) + for i, target := range t.Targets { + ta, err := ParseResourceAddress(target) + if err != nil { + return nil, err + } + addrs[i] = *ta + } + + return addrs, nil +} + +// Returns the list of targeted nodes. A targeted node is either addressed +// directly, or is an Ancestor of a targeted node. Destroy mode keeps +// Descendents instead of Ancestors. +func (t *TargetsTransformer) selectTargetedNodes( + g *Graph, addrs []ResourceAddress) (*dag.Set, error) { + targetedNodes := new(dag.Set) + + vertices := g.Vertices() + + for _, v := range vertices { + if t.nodeIsTarget(v, addrs) { + targetedNodes.Add(v) + + // We inform nodes that ask about the list of targets - helps for nodes + // that need to dynamically expand. Note that this only occurs for nodes + // that are already directly targeted. + if tn, ok := v.(GraphNodeTargetable); ok { + tn.SetTargets(addrs) + } + + var deps *dag.Set + var err error + if t.Destroy { + deps, err = g.Descendents(v) + } else { + deps, err = g.Ancestors(v) + } + if err != nil { + return nil, err + } + + for _, d := range deps.List() { + targetedNodes.Add(d) + } + } + } + + // Handle nodes that need to be included if their dependencies are included. + // This requires multiple passes since we need to catch transitive + // dependencies if and only if they are via other nodes that also + // support TargetDownstream. For example: + // output -> output -> targeted-resource: both outputs need to be targeted + // output -> non-targeted-resource -> targeted-resource: output not targeted + // + // We'll keep looping until we stop targeting more nodes. + queue := targetedNodes.List() + for len(queue) > 0 { + vertices := queue + queue = nil // ready to append for next iteration if neccessary + for _, v := range vertices { + dependers := g.UpEdges(v) + if dependers == nil { + // indicates that there are no up edges for this node, so + // we have nothing to do here. + continue + } + + dependers = dependers.Filter(func(dv interface{}) bool { + // Can ignore nodes that are already targeted + /*if targetedNodes.Include(dv) { + return false + }*/ + + _, ok := dv.(GraphNodeTargetDownstream) + return ok + }) + + if dependers.Len() == 0 { + continue + } + + for _, dv := range dependers.List() { + if targetedNodes.Include(dv) { + // Already present, so nothing to do + continue + } + + // We'll give the node some information about what it's + // depending on in case that informs its decision about whether + // it is safe to be targeted. + deps := g.DownEdges(v) + depsTargeted := deps.Intersection(targetedNodes) + depsUntargeted := deps.Difference(depsTargeted) + + if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { + targetedNodes.Add(dv) + // Need to visit this node on the next pass to see if it + // has any transitive dependers. + queue = append(queue, dv) + } + } + } + } + + return targetedNodes, nil +} + +func (t *TargetsTransformer) nodeIsTarget( + v dag.Vertex, addrs []ResourceAddress) bool { + r, ok := v.(GraphNodeResource) + if !ok { + return false + } + + addr := r.ResourceAddr() + for _, targetAddr := range addrs { + if targetAddr.Equals(addr) { + return true + } + } + + return false +} + +// RemovableIfNotTargeted is a special interface for graph nodes that +// aren't directly addressable, but need to be removed from the graph when they +// are not targeted. (Nodes that are not directly targeted end up in the set of +// targeted nodes because something that _is_ targeted depends on them.) The +// initial use case for this interface is GraphNodeConfigVariable, which was +// having trouble interpolating for module variables in targeted scenarios that +// filtered out the resource node being referenced. +type RemovableIfNotTargeted interface { + RemoveIfNotTargeted() bool +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go new file mode 100644 index 00000000..21842789 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go @@ -0,0 +1,20 @@ +package terraform + +// TransitiveReductionTransformer is a GraphTransformer that performs +// finds the transitive reduction of the graph. For a definition of +// transitive reduction, see Wikipedia. +type TransitiveReductionTransformer struct{} + +func (t *TransitiveReductionTransformer) Transform(g *Graph) error { + // If the graph isn't valid, skip the transitive reduction. + // We don't error here because Terraform itself handles graph + // validation in a better way, or we assume it does. + if err := g.Validate(); err != nil { + return nil + } + + // Do it + g.TransitiveReduction() + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go new file mode 100644 index 00000000..6b1293fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// VertexTransformer is a GraphTransformer that transforms vertices +// using the GraphVertexTransformers. The Transforms are run in sequential +// order. If a transform replaces a vertex then the next transform will see +// the new vertex. +type VertexTransformer struct { + Transforms []GraphVertexTransformer +} + +func (t *VertexTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + for _, vt := range t.Transforms { + newV, err := vt.Transform(v) + if err != nil { + return err + } + + // If the vertex didn't change, then don't do anything more + if newV == v { + continue + } + + // Vertex changed, replace it within the graph + if ok := g.Replace(v, newV); !ok { + // This should never happen, big problem + return fmt.Errorf( + "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", + dag.VertexName(v), dag.VertexName(newV), v, newV) + } + + // Replace v so that future transforms use the proper vertex + v = newV + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go new file mode 100644 index 00000000..7c874592 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go @@ -0,0 +1,26 @@ +package terraform + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(*InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go new file mode 100644 index 00000000..e3a07efa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go @@ -0,0 +1,23 @@ +package terraform + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go new file mode 100644 index 00000000..2207d1d0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(opts) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go new file mode 100644 index 00000000..84427c63 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go @@ -0,0 +1,7 @@ +package terraform + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go new file mode 100644 index 00000000..135a91c5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go @@ -0,0 +1,9 @@ +package terraform + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go new file mode 100644 index 00000000..7852bc42 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go @@ -0,0 +1,16 @@ +package terraform + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go new file mode 100644 index 00000000..878a0312 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go @@ -0,0 +1,15 @@ +package terraform + +// ProvisionerUIOutput is an implementation of UIOutput that calls a hook +// for the output so that the hooks can handle it. +type ProvisionerUIOutput struct { + Info *InstanceInfo + Type string + Hooks []Hook +} + +func (o *ProvisionerUIOutput) Output(msg string) { + for _, h := range o.Hooks { + h.ProvisionOutput(o.Info, o.Type, msg) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go new file mode 100644 index 00000000..f41f0d7d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/util.go @@ -0,0 +1,93 @@ +package terraform + +import ( + "sort" + "strings" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n == 0 { + panic("semaphore with limit 0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// resourceProvider returns the provider name for the given type. +func resourceProvider(t, alias string) string { + if alias != "" { + return alias + } + + idx := strings.IndexRune(t, '_') + if idx == -1 { + // If no underscores, the resource name is assumed to be + // also the provider name, e.g. if the provider exposes + // only a single resource of each type. + return t + } + + return t[:idx] +} + +// strSliceContains checks if a given string is contained in a slice +// When anybody asks why Go needs generics, here you go. +func strSliceContains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go new file mode 100644 index 00000000..300f2adb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go @@ -0,0 +1,166 @@ +package terraform + +import ( + "fmt" + "os" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/hilmapstructure" +) + +// Variables returns the fully loaded set of variables to use with +// ContextOpts and NewContext, loading any additional variables from +// the environment or any other sources. +// +// The given module tree doesn't need to be loaded. +func Variables( + m *module.Tree, + override map[string]interface{}) (map[string]interface{}, error) { + result := make(map[string]interface{}) + + // Variables are loaded in the following sequence. Each additional step + // will override conflicting variable keys from prior steps: + // + // * Take default values from config + // * Take values from TF_VAR_x env vars + // * Take values specified in the "override" param which is usually + // from -var, -var-file, etc. + // + + // First load from the config + for _, v := range m.Config().Variables { + // If the var has no default, ignore + if v.Default == nil { + continue + } + + // If the type isn't a string, we use it as-is since it is a rich type + if v.Type() != config.VariableTypeString { + result[v.Name] = v.Default + continue + } + + // v.Default has already been parsed as HCL but it may be an int type + switch typedDefault := v.Default.(type) { + case string: + if typedDefault == "" { + continue + } + result[v.Name] = typedDefault + case int, int64: + result[v.Name] = fmt.Sprintf("%d", typedDefault) + case float32, float64: + result[v.Name] = fmt.Sprintf("%f", typedDefault) + case bool: + result[v.Name] = fmt.Sprintf("%t", typedDefault) + default: + panic(fmt.Sprintf( + "Unknown default var type: %T\n\n"+ + "THIS IS A BUG. Please report it.", + v.Default)) + } + } + + // Load from env vars + for _, v := range os.Environ() { + if !strings.HasPrefix(v, VarEnvPrefix) { + continue + } + + // Strip off the prefix and get the value after the first "=" + idx := strings.Index(v, "=") + k := v[len(VarEnvPrefix):idx] + v = v[idx+1:] + + // Override the configuration-default values. Note that *not* finding the variable + // in configuration is OK, as we don't want to preclude people from having multiple + // sets of TF_VAR_whatever in their environment even if it is a little weird. + for _, schema := range m.Config().Variables { + if schema.Name != k { + continue + } + + varType := schema.Type() + varVal, err := parseVariableAsHCL(k, v, varType) + if err != nil { + return nil, err + } + + switch varType { + case config.VariableTypeMap: + if err := varSetMap(result, k, varVal); err != nil { + return nil, err + } + default: + result[k] = varVal + } + } + } + + // Load from overrides + for k, v := range override { + for _, schema := range m.Config().Variables { + if schema.Name != k { + continue + } + + switch schema.Type() { + case config.VariableTypeList: + result[k] = v + case config.VariableTypeMap: + if err := varSetMap(result, k, v); err != nil { + return nil, err + } + case config.VariableTypeString: + // Convert to a string and set. We don't catch any errors + // here because the validation step later should catch + // any type errors. + var strVal string + if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { + result[k] = strVal + } else { + result[k] = v + } + default: + panic(fmt.Sprintf( + "Unhandled var type: %T\n\n"+ + "THIS IS A BUG. Please report it.", + schema.Type())) + } + } + } + + return result, nil +} + +// varSetMap sets or merges the map in "v" with the key "k" in the +// "current" set of variables. This is just a private function to remove +// duplicate logic in Variables +func varSetMap(current map[string]interface{}, k string, v interface{}) error { + existing, ok := current[k] + if !ok { + current[k] = v + return nil + } + + existingMap, ok := existing.(map[string]interface{}) + if !ok { + panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) + } + + switch typedV := v.(type) { + case []map[string]interface{}: + for newKey, newVal := range typedV[0] { + existingMap[newKey] = newVal + } + case map[string]interface{}: + for newKey, newVal := range typedV { + existingMap[newKey] = newVal + } + default: + return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v)) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go new file mode 100644 index 00000000..577dc85a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/version.go @@ -0,0 +1,31 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +const Version = "0.9.6" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +const VersionPrerelease = "dev" + +// SemVersion is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVersion = version.Must(version.NewVersion(Version)) + +// VersionHeader is the header name used to send the current terraform version +// in http requests. +const VersionHeader = "Terraform-Version" + +func VersionString() string { + if VersionPrerelease != "" { + return fmt.Sprintf("%s-%s", Version, VersionPrerelease) + } + return Version +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go new file mode 100644 index 00000000..cbd78dd9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. + +package terraform + +import "fmt" + +const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" + +var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} + +func (i walkOperation) String() string { + if i >= walkOperation(len(_walkOperation_index)-1) { + return fmt.Sprintf("walkOperation(%d)", i) + } + return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 00000000..b03310a9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 00000000..ad17bf00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 00000000..187ef676 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 00000000..67df3fc1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,12 @@ +package jmespath + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 00000000..1cd2d239 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 00000000..8a3f2ef0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,840 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": functionEntry{ + name: "length", + arguments: []argSpec{ + argSpec{types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": functionEntry{ + name: "starts_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": functionEntry{ + name: "abs", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": functionEntry{ + name: "avg", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": functionEntry{ + name: "ceil", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": functionEntry{ + name: "contains", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": functionEntry{ + name: "ends_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": functionEntry{ + name: "floor", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": functionEntry{ + name: "amp", + arguments: []argSpec{ + argSpec{types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": functionEntry{ + name: "max", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": functionEntry{ + name: "merge", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": functionEntry{ + name: "max_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": functionEntry{ + name: "sum", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": functionEntry{ + name: "min", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": functionEntry{ + name: "min_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": functionEntry{ + name: "type", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": functionEntry{ + name: "keys", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": functionEntry{ + name: "values", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": functionEntry{ + name: "sort", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": functionEntry{ + name: "sort_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": functionEntry{ + name: "join", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": functionEntry{ + name: "reverse", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": functionEntry{ + name: "to_array", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": functionEntry{ + name: "to_string", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": functionEntry{ + name: "to_number", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": functionEntry{ + name: "not_null", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if _, ok := arg.([]interface{}); ok { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if c, ok := arg.([]interface{}); ok { + return float64(len(c)), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 00000000..13c74604 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 00000000..817900c8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 00000000..c8f4bceb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 00000000..dae79cbd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 00000000..ddc1b7d7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 00000000..22985159 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 00000000..bcb8c8d2 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 00000000..db6a6aa1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 00000000..ca658132 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,545 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + v = reflect.ValueOf(dup) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 00000000..d70706d5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 00000000..47e1f9ef --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,137 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 00000000..659d6885 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 00000000..acdaadef --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,158 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 00000000..47a99e5a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 00000000..6ec5c333 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,828 @@ +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + case reflect.Func: + err = d.decodeFunc(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } + } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 00000000..ac82cd2e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 00000000..6a7f1761 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 00000000..d3cfe854 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Location location.go; DO NOT EDIT + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} + +func (i Location) String() string { + if i+1 >= Location(len(_Location_index)) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 00000000..d7ab7b6d --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,401 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + if err := walk(kv, w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE new file mode 100644 index 00000000..488357b8 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2016 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md new file mode 100644 index 00000000..b6aad1c8 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2016 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go new file mode 100644 index 00000000..9c7fbaa5 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -0,0 +1,488 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "net" + "os" + "sync" + "time" +) + +// UUID layout variants. +const ( + VariantNCS = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +// Used in string method conversion +const dash byte = '-' + +// UUID v1/v2 storage. +var ( + storageMutex sync.Mutex + storageOnce sync.Once + epochFunc = unixTimeFunc + clockSequence uint16 + lastTime uint64 + hardwareAddr [6]byte + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +func initClockSequence() { + buf := make([]byte, 2) + safeRandom(buf) + clockSequence = binary.BigEndian.Uint16(buf) +} + +func initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + safeRandom(hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + hardwareAddr[0] |= 0x01 +} + +func initStorage() { + initClockSequence() + initHardwareAddr() +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [16]byte + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// The nil UUID is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// And returns result of binary AND of two UUIDs. +func And(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] & u2[i] + } + return u +} + +// Or returns result of binary OR of two UUIDs. +func Or(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] | u2[i] + } + return u +} + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() uint { + switch { + case (u[8] & 0x80) == 0x00: + return VariantNCS + case (u[8]&0xc0)|0x80 == 0x80: + return VariantRFC4122 + case (u[8]&0xe0)|0xc0 == 0xc0: + return VariantMicrosoft + } + return VariantFuture +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = dash + hex.Encode(buf[9:13], u[4:6]) + buf[13] = dash + hex.Encode(buf[14:18], u[6:8]) + buf[18] = dash + hex.Encode(buf[19:23], u[8:10]) + buf[23] = dash + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits as described in RFC 4122. +func (u *UUID) SetVariant() { + u[8] = (u[8] & 0xbf) | 0x80 +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +func (u *UUID) UnmarshalText(text []byte) (err error) { + if len(text) < 32 { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + t := text[:] + braced := false + + if bytes.Equal(t[:9], urnPrefix) { + t = t[9:] + } else if t[0] == '{' { + braced = true + t = t[1:] + } + + b := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 && t[0] == '-' { + t = t[1:] + } else if i > 0 && t[0] != '-' { + err = fmt.Errorf("uuid: invalid string format") + return + } + + if i == 2 { + if !bytes.Contains([]byte("012345"), []byte{t[0]}) { + err = fmt.Errorf("uuid: invalid version number: %s", t[0]) + return + } + } + + if len(t) < byteGroup { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + if i == 4 && len(t) > byteGroup && + ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { + err = fmt.Errorf("uuid: UUID string too long: %s", t) + return + } + + _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) + + if err != nil { + return + } + + t = t[byteGroup:] + b = b[byteGroup/2:] + } + + return +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != 16 { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == 16 { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func getStorage() (uint64, uint16, []byte) { + storageOnce.Do(initStorage) + + storageMutex.Lock() + defer storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= lastTime { + clockSequence++ + } + lastTime = timeNow + + return timeNow, clockSequence, hardwareAddr[:] +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(1) + u.SetVariant() + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(2) + u.SetVariant() + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(3) + u.SetVariant() + + return u +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + u := UUID{} + safeRandom(u[:]) + u.SetVersion(4) + u.SetVariant() + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(5) + u.SetVariant() + + return u +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} From 70050fe1af18f1eb14bf49d1510b7f60eb37c697 Mon Sep 17 00:00:00 2001 From: Anubhav Mishra Date: Wed, 1 Nov 2017 15:22:37 -0700 Subject: [PATCH 2/2] hey --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cc8a967f..f3afb1af 100644 --- a/README.md +++ b/README.md @@ -80,3 +80,4 @@ $ tfjson terraform.tfplan ## License This project is made available under the [MIT License](http://opensource.org/licenses/MIT). +