From 0d62eba17d360b619afae83d9457b55befceef8a Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Tue, 16 Jun 2020 14:52:49 -0400 Subject: [PATCH 01/11] wip - packetize --- OWNERS_ALIASES | 4 ++++ cmd/openshift-install/destroy.go | 1 + cmd/openshift-install/gather.go | 11 +++++++++++ data/data/packet/OWNERS | 7 +++++++ data/data/packet/bootstrap/README.md | 0 data/data/packet/bootstrap/main.tf | 0 data/data/packet/bootstrap/output.tf | 0 data/data/packet/bootstrap/variables.tf | 0 data/data/packet/bootstrap/versions.tf | 0 data/data/packet/dns/dns.tf | 0 data/data/packet/dns/variables.tf | 0 data/data/packet/dns/versions.tf | 0 data/data/packet/main.tf | 0 data/data/packet/master/main.tf | 0 data/data/packet/master/outputs.tf | 0 data/data/packet/master/variables.tf | 0 data/data/packet/master/versions.tf | 0 data/data/packet/variables-packet.tf | 0 data/data/packet/versions.tf | 0 data/data/packet/vnet/internal-lb.tf | 0 data/data/packet/vnet/outputs.tf | 0 data/data/packet/vnet/public-lb.tf | 0 data/data/packet/vnet/variables.tf | 0 data/data/packet/vnet/versions.tf | 0 data/data/packet/vnet/vnet.tf | 0 pkg/asset/cluster/packet/packet.go | 0 .../packet/mock/packetclient_generated.go | 0 pkg/asset/installconfig/packet/packet.go | 0 pkg/destroy/packet/packet.go | 0 pkg/terraform/exec/plugins/packet.go | 0 pkg/terraform/gather/packet/OWNERS | 7 +++++++ pkg/terraform/gather/packet/ip.go | 0 pkg/tfvars/packet/packet.go | 0 33 files changed, 30 insertions(+) create mode 100644 data/data/packet/OWNERS create mode 100644 data/data/packet/bootstrap/README.md create mode 100644 data/data/packet/bootstrap/main.tf create mode 100644 data/data/packet/bootstrap/output.tf create mode 100644 data/data/packet/bootstrap/variables.tf create mode 100644 data/data/packet/bootstrap/versions.tf create mode 100644 data/data/packet/dns/dns.tf create mode 100644 data/data/packet/dns/variables.tf create mode 100644 data/data/packet/dns/versions.tf create mode 100644 data/data/packet/main.tf create mode 100644 data/data/packet/master/main.tf create mode 100644 data/data/packet/master/outputs.tf create mode 100644 data/data/packet/master/variables.tf create mode 100644 data/data/packet/master/versions.tf create mode 100644 data/data/packet/variables-packet.tf create mode 100644 data/data/packet/versions.tf create mode 100644 data/data/packet/vnet/internal-lb.tf create mode 100644 data/data/packet/vnet/outputs.tf create mode 100644 data/data/packet/vnet/public-lb.tf create mode 100644 data/data/packet/vnet/variables.tf create mode 100644 data/data/packet/vnet/versions.tf create mode 100644 data/data/packet/vnet/vnet.tf create mode 100644 pkg/asset/cluster/packet/packet.go create mode 100644 pkg/asset/installconfig/packet/mock/packetclient_generated.go create mode 100644 pkg/asset/installconfig/packet/packet.go create mode 100644 pkg/destroy/packet/packet.go create mode 100644 pkg/terraform/exec/plugins/packet.go create mode 100644 pkg/terraform/gather/packet/OWNERS create mode 100644 pkg/terraform/gather/packet/ip.go create mode 100644 pkg/tfvars/packet/packet.go diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 8ac5b0acb12..856ed52d2bc 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -80,3 +80,7 @@ aliases: - Gal-Zaidman - gekorob - rgolangh + packet-approvers: + - displague + packet-reviewers: + - displague diff --git a/cmd/openshift-install/destroy.go b/cmd/openshift-install/destroy.go index e813e095a3c..ddfc430040c 100644 --- a/cmd/openshift-install/destroy.go +++ b/cmd/openshift-install/destroy.go @@ -18,6 +18,7 @@ import ( _ "github.com/openshift/installer/pkg/destroy/libvirt" _ "github.com/openshift/installer/pkg/destroy/openstack" _ "github.com/openshift/installer/pkg/destroy/ovirt" + _ "github.com/openshift/installer/pkg/destroy/packet" _ "github.com/openshift/installer/pkg/destroy/vsphere" timer "github.com/openshift/installer/pkg/metrics/timer" "github.com/openshift/installer/pkg/terraform" diff --git a/cmd/openshift-install/gather.go b/cmd/openshift-install/gather.go index e5f8456d051..7da0c6e2201 100644 --- a/cmd/openshift-install/gather.go +++ b/cmd/openshift-install/gather.go @@ -32,6 +32,7 @@ import ( gatherlibvirt "github.com/openshift/installer/pkg/terraform/gather/libvirt" gatheropenstack "github.com/openshift/installer/pkg/terraform/gather/openstack" gatherovirt "github.com/openshift/installer/pkg/terraform/gather/ovirt" + gatherpacket "github.com/openshift/installer/pkg/terraform/gather/packet" gathervsphere "github.com/openshift/installer/pkg/terraform/gather/vsphere" "github.com/openshift/installer/pkg/types" awstypes "github.com/openshift/installer/pkg/types/aws" @@ -41,6 +42,7 @@ import ( libvirttypes "github.com/openshift/installer/pkg/types/libvirt" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -228,6 +230,15 @@ func extractHostAddresses(config *types.InstallConfig, tfstate *terraform.State) return bootstrap, port, masters, err } masters, err = gatherovirt.ControlPlaneIPs(tfstate) + case packettypes.Name: + bootstrap, err = gatherpacket.BootstrapIP(tfstate) + if err != nil { + return bootstrap, port, masters, err + } + masters, err = gatherpacket.ControlPlaneIPs(tfstate) + if err != nil { + logrus.Error(err) + } case vspheretypes.Name: bootstrap, err = gathervsphere.BootstrapIP(config, tfstate) if err != nil { diff --git a/data/data/packet/OWNERS b/data/data/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/data/data/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/data/data/packet/bootstrap/README.md b/data/data/packet/bootstrap/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/bootstrap/main.tf b/data/data/packet/bootstrap/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/bootstrap/output.tf b/data/data/packet/bootstrap/output.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/bootstrap/variables.tf b/data/data/packet/bootstrap/variables.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/bootstrap/versions.tf b/data/data/packet/bootstrap/versions.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/dns/dns.tf b/data/data/packet/dns/dns.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/dns/variables.tf b/data/data/packet/dns/variables.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/dns/versions.tf b/data/data/packet/dns/versions.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/main.tf b/data/data/packet/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/master/main.tf b/data/data/packet/master/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/master/outputs.tf b/data/data/packet/master/outputs.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/master/variables.tf b/data/data/packet/master/variables.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/master/versions.tf b/data/data/packet/master/versions.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/variables-packet.tf b/data/data/packet/variables-packet.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/versions.tf b/data/data/packet/versions.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/vnet/internal-lb.tf b/data/data/packet/vnet/internal-lb.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/vnet/outputs.tf b/data/data/packet/vnet/outputs.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/vnet/public-lb.tf b/data/data/packet/vnet/public-lb.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/vnet/variables.tf b/data/data/packet/vnet/variables.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/vnet/versions.tf b/data/data/packet/vnet/versions.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/data/data/packet/vnet/vnet.tf b/data/data/packet/vnet/vnet.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/asset/cluster/packet/packet.go b/pkg/asset/cluster/packet/packet.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/asset/installconfig/packet/mock/packetclient_generated.go b/pkg/asset/installconfig/packet/mock/packetclient_generated.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/asset/installconfig/packet/packet.go b/pkg/asset/installconfig/packet/packet.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/destroy/packet/packet.go b/pkg/destroy/packet/packet.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/terraform/exec/plugins/packet.go b/pkg/terraform/exec/plugins/packet.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/terraform/gather/packet/OWNERS b/pkg/terraform/gather/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/pkg/terraform/gather/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/pkg/terraform/gather/packet/ip.go b/pkg/terraform/gather/packet/ip.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/tfvars/packet/packet.go b/pkg/tfvars/packet/packet.go new file mode 100644 index 00000000000..e69de29bb2d From eb0f4a45bf5c0a1e835f9c82df874dd85ca9f676 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Thu, 18 Jun 2020 17:05:41 -0400 Subject: [PATCH 02/11] stub out more files and interfaces Packet will need Signed-off-by: Marques Johansson --- README.md | 1 + docs/user/packet/OWNERS | 7 + docs/user/packet/README.md | 7 + docs/user/packet/install_ipi.md | 13 + go.mod | 2 + go.sum | 27 +- pkg/asset/installconfig/packet/client.go | 31 + pkg/asset/installconfig/packet/config.go | 73 + pkg/asset/installconfig/packet/packet.go | 38 + pkg/destroy/packet/OWNERS | 7 + pkg/destroy/packet/doc.go | 2 + pkg/destroy/packet/packet.go | 39 + pkg/terraform/exec/plugins/packet.go | 15 + pkg/terraform/gather/packet/ip.go | 18 + pkg/types/packet/OWNERS | 7 + pkg/types/packet/defaults/platform.go | 7 + pkg/types/packet/defaults/platform_test.go | 1 + pkg/types/packet/doc.go | 6 + pkg/types/packet/platform.go | 5 + pkg/types/packet/validation/machinepool.go | 1 + .../packet/validation/machinepool_test.go | 1 + pkg/types/packet/validation/platform.go | 1 + pkg/types/packet/validation/platform_test.go | 1 + .../hashicorp/terraform/flatmap/expand.go | 152 ++ .../hashicorp/terraform/flatmap/flatten.go | 71 + .../hashicorp/terraform/flatmap/map.go | 82 + .../terraform/helper/config/decode.go | 28 + .../terraform/helper/config/validator.go | 214 +++ .../terraform/helper/resource/error.go | 79 + .../helper/resource/grpc_test_provider.go | 43 + .../hashicorp/terraform/helper/resource/id.go | 45 + .../terraform/helper/resource/map.go | 140 ++ .../terraform/helper/resource/resource.go | 49 + .../terraform/helper/resource/state.go | 259 ++++ .../terraform/helper/resource/state_shim.go | 188 +++ .../terraform/helper/resource/testing.go | 1320 +++++++++++++++++ .../helper/resource/testing_config.go | 404 +++++ .../helper/resource/testing_import_state.go | 232 +++ .../terraform/helper/resource/wait.go | 84 ++ .../github.com/packethost/packngo/.drone.yml | 28 + .../github.com/packethost/packngo/.gitignore | 29 + .../packethost/packngo/CHANGELOG.md | 54 + .../github.com/packethost/packngo/LICENSE.txt | 56 + .../github.com/packethost/packngo/README.md | 127 ++ .../github.com/packethost/packngo/batches.go | 97 ++ .../packethost/packngo/bgp_configs.go | 81 + .../packethost/packngo/bgp_sessions.go | 72 + .../packethost/packngo/billing_address.go | 7 + .../packethost/packngo/capacities.go | 79 + .../github.com/packethost/packngo/connect.go | 148 ++ .../github.com/packethost/packngo/devices.go | 344 +++++ vendor/github.com/packethost/packngo/email.go | 87 ++ .../github.com/packethost/packngo/events.go | 104 ++ .../packethost/packngo/facilities.go | 56 + vendor/github.com/packethost/packngo/go.mod | 7 + vendor/github.com/packethost/packngo/go.sum | 11 + .../packngo/hardware_reservations.go | 99 ++ vendor/github.com/packethost/packngo/ip.go | 198 +++ .../packethost/packngo/notifications.go | 95 ++ .../packethost/packngo/operatingsystems.go | 42 + .../packethost/packngo/organizations.go | 171 +++ .../github.com/packethost/packngo/packngo.go | 394 +++++ .../packethost/packngo/payment_methods.go | 72 + vendor/github.com/packethost/packngo/plans.go | 126 ++ vendor/github.com/packethost/packngo/ports.go | 321 ++++ .../github.com/packethost/packngo/projects.go | 174 +++ vendor/github.com/packethost/packngo/rate.go | 12 + .../packethost/packngo/spotmarket.go | 39 + .../packethost/packngo/spotmarketrequest.go | 114 ++ .../github.com/packethost/packngo/sshkeys.go | 139 ++ .../packethost/packngo/timestamp.go | 35 + .../packethost/packngo/two_factor_auth.go | 56 + vendor/github.com/packethost/packngo/user.go | 100 ++ vendor/github.com/packethost/packngo/utils.go | 115 ++ .../packethost/packngo/virtualnetworks.go | 92 ++ .../github.com/packethost/packngo/volumes.go | 238 +++ vendor/github.com/packethost/packngo/vpn.go | 50 + .../terraform-provider-packet/LICENSE | 373 +++++ .../packet/config.go | 22 + .../datasource_packet_operating_system.go | 130 ++ .../datasource_packet_precreated_ip_block.go | 96 ++ .../datasource_packet_spot_market_price.go | 54 + .../packet/errors.go | 49 + .../packet/provider.go | 60 + .../packet/resource_packet_bgp_session.go | 98 ++ .../packet/resource_packet_connect.go | 157 ++ .../packet/resource_packet_device.go | 652 ++++++++ .../packet/resource_packet_ip_attachment.go | 102 ++ .../packet/resource_packet_organization.go | 167 +++ .../resource_packet_port_vlan_attachment.go | 171 +++ .../packet/resource_packet_project.go | 281 ++++ .../packet/resource_packet_project_ssh_key.go | 54 + .../resource_packet_reserved_ip_block.go | 234 +++ .../resource_packet_spot_market_request.go | 311 ++++ .../packet/resource_packet_ssh_key.go | 135 ++ .../packet/resource_packet_vlan.go | 84 ++ .../packet/resource_packet_volume.go | 312 ++++ .../resource_packet_volume_attachment.go | 83 ++ .../terraform-provider-packet/packet/utils.go | 21 + vendor/modules.txt | 9 + 100 files changed, 11416 insertions(+), 8 deletions(-) create mode 100644 docs/user/packet/OWNERS create mode 100644 docs/user/packet/README.md create mode 100644 docs/user/packet/install_ipi.md create mode 100644 pkg/asset/installconfig/packet/client.go create mode 100644 pkg/asset/installconfig/packet/config.go create mode 100644 pkg/destroy/packet/OWNERS create mode 100644 pkg/destroy/packet/doc.go create mode 100644 pkg/types/packet/OWNERS create mode 100644 pkg/types/packet/defaults/platform.go create mode 100644 pkg/types/packet/defaults/platform_test.go create mode 100644 pkg/types/packet/doc.go create mode 100644 pkg/types/packet/platform.go create mode 100644 pkg/types/packet/validation/machinepool.go create mode 100644 pkg/types/packet/validation/machinepool_test.go create mode 100644 pkg/types/packet/validation/platform.go create mode 100644 pkg/types/packet/validation/platform_test.go create mode 100644 vendor/github.com/hashicorp/terraform/flatmap/expand.go create mode 100644 vendor/github.com/hashicorp/terraform/flatmap/flatten.go create mode 100644 vendor/github.com/hashicorp/terraform/flatmap/map.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/config/decode.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/config/validator.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/error.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/id.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/map.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/resource.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/state.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/wait.go create mode 100644 vendor/github.com/packethost/packngo/.drone.yml create mode 100644 vendor/github.com/packethost/packngo/.gitignore create mode 100644 vendor/github.com/packethost/packngo/CHANGELOG.md create mode 100644 vendor/github.com/packethost/packngo/LICENSE.txt create mode 100644 vendor/github.com/packethost/packngo/README.md create mode 100644 vendor/github.com/packethost/packngo/batches.go create mode 100644 vendor/github.com/packethost/packngo/bgp_configs.go create mode 100644 vendor/github.com/packethost/packngo/bgp_sessions.go create mode 100644 vendor/github.com/packethost/packngo/billing_address.go create mode 100644 vendor/github.com/packethost/packngo/capacities.go create mode 100644 vendor/github.com/packethost/packngo/connect.go create mode 100644 vendor/github.com/packethost/packngo/devices.go create mode 100644 vendor/github.com/packethost/packngo/email.go create mode 100644 vendor/github.com/packethost/packngo/events.go create mode 100644 vendor/github.com/packethost/packngo/facilities.go create mode 100644 vendor/github.com/packethost/packngo/go.mod create mode 100644 vendor/github.com/packethost/packngo/go.sum create mode 100644 vendor/github.com/packethost/packngo/hardware_reservations.go create mode 100644 vendor/github.com/packethost/packngo/ip.go create mode 100644 vendor/github.com/packethost/packngo/notifications.go create mode 100644 vendor/github.com/packethost/packngo/operatingsystems.go create mode 100644 vendor/github.com/packethost/packngo/organizations.go create mode 100644 vendor/github.com/packethost/packngo/packngo.go create mode 100644 vendor/github.com/packethost/packngo/payment_methods.go create mode 100644 vendor/github.com/packethost/packngo/plans.go create mode 100644 vendor/github.com/packethost/packngo/ports.go create mode 100644 vendor/github.com/packethost/packngo/projects.go create mode 100644 vendor/github.com/packethost/packngo/rate.go create mode 100644 vendor/github.com/packethost/packngo/spotmarket.go create mode 100644 vendor/github.com/packethost/packngo/spotmarketrequest.go create mode 100644 vendor/github.com/packethost/packngo/sshkeys.go create mode 100644 vendor/github.com/packethost/packngo/timestamp.go create mode 100644 vendor/github.com/packethost/packngo/two_factor_auth.go create mode 100644 vendor/github.com/packethost/packngo/user.go create mode 100644 vendor/github.com/packethost/packngo/utils.go create mode 100644 vendor/github.com/packethost/packngo/virtualnetworks.go create mode 100644 vendor/github.com/packethost/packngo/volumes.go create mode 100644 vendor/github.com/packethost/packngo/vpn.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/LICENSE create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_spot_market_request.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/utils.go diff --git a/README.md b/README.md index 22c4a02413d..40ea86fed8a 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ * [OpenStack](docs/user/openstack/README.md) * [OpenStack (UPI) (Experimental)](docs/user/openstack/install_upi.md) * [oVirt](docs/user/ovirt/install_ipi.md) +* [Packet](docs/user/packet/install_ipi.md) * [vSphere](docs/user/vsphere/README.md) * [vSphere (UPI)](docs/user/vsphere/install_upi.md) diff --git a/docs/user/packet/OWNERS b/docs/user/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/docs/user/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/docs/user/packet/README.md b/docs/user/packet/README.md new file mode 100644 index 00000000000..7b5803f4deb --- /dev/null +++ b/docs/user/packet/README.md @@ -0,0 +1,7 @@ +# Packet Project Setup + +This document is a guide for preparing a new Packet project for use with OpenShift. + +Follow along with the steps and links below to configure your Packet project and provision an OpenShift cluster: + +1. [Step 1](step1.md) \ No newline at end of file diff --git a/docs/user/packet/install_ipi.md b/docs/user/packet/install_ipi.md new file mode 100644 index 00000000000..04dfca0bc47 --- /dev/null +++ b/docs/user/packet/install_ipi.md @@ -0,0 +1,13 @@ +# Install using Packet platform provider + +## Overview + +## Prerequisite + +## Minimum resources + +## Install + +### Install using the wizard + +### Install in stages when customization is needed diff --git a/go.mod b/go.mod index cc030934613..59e4ceaed3c 100644 --- a/go.mod +++ b/go.mod @@ -62,6 +62,7 @@ require ( github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27 github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200406133650-74a154c1d861 github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db // indirect + github.com/packethost/packngo v0.2.0 github.com/pborman/uuid v1.2.0 github.com/pierrec/lz4 v2.3.0+incompatible // indirect github.com/pkg/errors v0.9.1 @@ -79,6 +80,7 @@ require ( github.com/terraform-providers/terraform-provider-ignition v1.2.1 github.com/terraform-providers/terraform-provider-local v1.4.0 github.com/terraform-providers/terraform-provider-openstack v1.28.0 + github.com/terraform-providers/terraform-provider-packet v1.7.2 github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f github.com/terraform-providers/terraform-provider-vsphere v1.16.2 github.com/ulikunitz/xz v0.5.6 diff --git a/go.sum b/go.sum index 1c7cd49525a..a2bf84e2289 100644 --- a/go.sum +++ b/go.sum @@ -279,6 +279,7 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= github.com/btubbs/datetime v0.1.0/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= github.com/btubbs/datetime v0.1.1 h1:KuV+F9tyq/hEnezmKZNGk8dzqMVsId6EpFVrQCfA3To= github.com/btubbs/datetime v0.1.1/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= @@ -842,17 +843,12 @@ github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbG github.com/gophercloud/gophercloud v0.6.1-0.20191025185032-6ad562af8c1f/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM= github.com/gophercloud/gophercloud v0.7.1-0.20191210042042-7aa2e52d21f9/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.7.1-0.20191211202411-f940f50ff1f7/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.8.0 h1:1ylFFLRx7otpfRPSuOm77q8HLSlSOwYCGDeXmXJhX7A= -github.com/gophercloud/gophercloud v0.8.0/go.mod h1:Kc/QKr9thLKruO/dG0szY8kRIYS+iENz0ziI0hJf76A= github.com/gophercloud/gophercloud v0.10.1-0.20200424014253-c3bfe50899e5/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.11.0 h1:pYMP9UZBdQa3lsfIZ1tZor4EbtxiuB6BHhocenkiH/E= github.com/gophercloud/gophercloud v0.11.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/utils v0.0.0-20190124231947-9c3b9f2457ef/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= github.com/gophercloud/utils v0.0.0-20190313033024-0bcc8e728cb5/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gophercloud/utils v0.0.0-20191129022341-463e26ffa30d/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= -github.com/gophercloud/utils v0.0.0-20191212191830-4533a07bd492 h1:NAwq2GgRiqbNLw1cA7KUdt7lDR/NzJtk4EXGxO3gqas= -github.com/gophercloud/utils v0.0.0-20191212191830-4533a07bd492/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gophercloud/utils v0.0.0-20200423144003-7c72efc7435d/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c h1:iawx2ojEQA7c+GmkaVO5sN+k8YONibXyDO8RlsC+1bs= github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= @@ -872,7 +868,6 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= @@ -908,6 +903,7 @@ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoP github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-azure-helpers v0.4.1/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= @@ -919,10 +915,12 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= +github.com/hashicorp/go-getter v1.1.0/go.mod h1:q+PoBhh16brIKwJS9kt18jEtXHTg2EGkmrA9P7HVS+U= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 h1:l1KB3bHVdvegcIf5upQ5mjcHjs2qsWnKh4Yr9xgIuu8= github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.7.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -934,10 +932,12 @@ github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTg github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= @@ -986,6 +986,7 @@ github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV github.com/hashicorp/hcl/v2 v2.1.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= +github.com/hashicorp/hcl2 v0.0.0-20190226234159-7e26f2f34612/go.mod h1:HtEzazM5AZ9fviNEof8QZB4T1Vz9UhHrGhnMPzl//Ek= github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93 h1:T1Q6ag9tCwun16AW+XK3tAql24P4uTGUMIn1/92WsQQ= github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= @@ -1517,6 +1518,9 @@ github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOTh github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db h1:9uViuKtx1jrlXLBW/pMnhOfzn3iSEdLase/But/IZRU= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/packethost/packngo v0.1.1-0.20190410075950-a02c426e4888/go.mod h1:RQHg5xR1F614BwJyepfMqrKN+32IH0i7yX+ey43rEeQ= +github.com/packethost/packngo v0.2.0 h1:mSlzOof8PsOWCy78sBMt/PwMJTEjjQ/rRvMixu4Nm6c= +github.com/packethost/packngo v0.2.0/go.mod h1:RQHg5xR1F614BwJyepfMqrKN+32IH0i7yX+ey43rEeQ= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= @@ -1786,10 +1790,10 @@ github.com/terraform-providers/terraform-provider-local v1.4.0 h1:n0CNTMjBfCC5R6 github.com/terraform-providers/terraform-provider-local v1.4.0/go.mod h1:nbnWkAjiiG0FHlsfYYMRfBwvDbo6eLjorQs/mmRGi14= github.com/terraform-providers/terraform-provider-null v1.0.1-0.20191204185112-e5c592237f62 h1:vi12lh3hnvMclOqFwEf73s8JCUTPTchOhEmf/7uMPn0= github.com/terraform-providers/terraform-provider-null v1.0.1-0.20191204185112-e5c592237f62/go.mod h1:RaAgicYv+oKLyZpaQB5BOkwL/t5WKYHQ+Q0kgMgXgR4= -github.com/terraform-providers/terraform-provider-openstack v1.25.0 h1:cUGi9qbg/6B7lv4+HyrarINEtVMSIouEuEuVpOE/0zY= -github.com/terraform-providers/terraform-provider-openstack v1.25.0/go.mod h1:cTefkvjGdhORynFvVsB2beWN+XmGhjAm3RBrnEJLvfs= github.com/terraform-providers/terraform-provider-openstack v1.28.0 h1:yiT3Z5fDkJt0YX5BDkX/+0uwGpX/uNjVsuYqFIJ/kL0= github.com/terraform-providers/terraform-provider-openstack v1.28.0/go.mod h1:MxR5egxGj9OfPTj0VorSjpIVAi3OT24jOMiCBH/d7hU= +github.com/terraform-providers/terraform-provider-packet v1.7.2 h1:hYN7YsuR9dp4P/MPRRnh5m5J1/tw53BbXiLDlsDTWw4= +github.com/terraform-providers/terraform-provider-packet v1.7.2/go.mod h1:/k5o0Y30me0844mFLk5hM0TnP7OM3v1FC1hu7ZTTrNM= github.com/terraform-providers/terraform-provider-random v0.0.0-20190925200408-30dac3233094/go.mod h1:F4KE9YftuJyMiBth4W1kCrsyOHndtTjAmZ+ZzjqWY+4= github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f h1:oqZwtMD9/XcOcCzm/9cz8+pQWRTGF60N1RNcYLg+BCw= github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f/go.mod h1:F4KE9YftuJyMiBth4W1kCrsyOHndtTjAmZ+ZzjqWY+4= @@ -1890,6 +1894,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v0.0.0-20190124225737-a385d646c1e9/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v0.0.0-20190212192503-19dda139b164/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.1/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= @@ -1954,6 +1960,7 @@ golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1965,6 +1972,7 @@ golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228050851-31a38585487a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -2051,11 +2059,13 @@ golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2130,6 +2140,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/asset/installconfig/packet/client.go b/pkg/asset/installconfig/packet/client.go new file mode 100644 index 00000000000..01a6ff9c62d --- /dev/null +++ b/pkg/asset/installconfig/packet/client.go @@ -0,0 +1,31 @@ +package packet + +import ( + packngo "github.com/packethost/packngo" + "github.com/pkg/errors" +) + +// getConnection is a convenience method to get a connection to packet api +// form a Config Object. +func getConnection(_ Config) (*packngo.Client, error) { + // TODO(displague) NewClientWith... + con, err := packngo.NewClient() + if err != nil { + return nil, err + } + return con, nil +} + +// NewConnection returns a new client connection to Packet's API endpoint. +// It is the responsibility of the caller to close the connection. +func NewConnection() (*packngo.Client, error) { + packetConfig, err := NewConfig() + if err != nil { + return nil, errors.Wrap(err, "getting Engine configuration") + } + con, err := getConnection(packetConfig) + if err != nil { + return nil, errors.Wrap(err, "establishing Engine connection") + } + return con, nil +} diff --git a/pkg/asset/installconfig/packet/config.go b/pkg/asset/installconfig/packet/config.go new file mode 100644 index 00000000000..e91035673a8 --- /dev/null +++ b/pkg/asset/installconfig/packet/config.go @@ -0,0 +1,73 @@ +package packet + +import ( + "io/ioutil" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +var defaultPacketConfigEnvVar = "PACKET_CONFIG" + +// TODO(displague) what is the preferred config for Packet projects? support +// both yaml and json? +var defaultPacketConfigPath = filepath.Join(os.Getenv("HOME"), "packet-config.yaml") + +// Config holds Packet api access details +type Config struct { +} + +// LoadPacketConfig from the following location (first wins): +// 1. PACKET_CONFIG env variable +// 2 $defaultPacketConfigPath +// See #@Config for the expected format +func LoadPacketConfig() ([]byte, error) { + data, err := ioutil.ReadFile(discoverPath()) + if err != nil { + return nil, err + } + return data, nil +} + +// NewConfig will return an Config by loading +// the configuration from locations specified in @LoadPacketConfig +func NewConfig() (Config, error) { + c := Config{} + in, err := LoadPacketConfig() + if err != nil { + return c, err + } + + err = yaml.Unmarshal(in, &c) + if err != nil { + return c, err + } + + return c, nil +} + +func discoverPath() string { + path, _ := os.LookupEnv(defaultPacketConfigEnvVar) + if path != "" { + return path + } + + return defaultPacketConfigPath +} + +// Save will serialize the config back into the locations +// specified in @LoadPacketConfig, first location with a file, wins. +func (c *Config) Save() error { + out, err := yaml.Marshal(c) + if err != nil { + return err + } + + path := discoverPath() + err = os.MkdirAll(filepath.Dir(path), 0700) + if err != nil { + return err + } + return ioutil.WriteFile(path, out, 0600) +} diff --git a/pkg/asset/installconfig/packet/packet.go b/pkg/asset/installconfig/packet/packet.go index e69de29bb2d..95c175f6815 100644 --- a/pkg/asset/installconfig/packet/packet.go +++ b/pkg/asset/installconfig/packet/packet.go @@ -0,0 +1,38 @@ +// Package packet collects packet-specific configuration. +package packet + +import ( + survey "gopkg.in/AlecAivazis/survey.v1" + + "github.com/openshift/installer/pkg/types/packet" + packetdefaults "github.com/openshift/installer/pkg/types/packet/defaults" + "github.com/openshift/installer/pkg/validate" +) + +// Platform collects packet-specific configuration. +func Platform() (*packet.Platform, error) { + var uri string + err := survey.Ask([]*survey.Question{ + // TODO(displague) ask the right questions + { + Prompt: &survey.Input{ + Message: "Packet Connection URI", + Help: "The packet connection URI to be used. This must be accessible from the running cluster.", + Default: packetdefaults.DefaultURI, + }, + Validate: survey.ComposeValidators(survey.Required, uriValidator), + }, + }, &uri) + if err != nil { + return nil, err + } + + return &packet.Platform{}, nil + // TODO(displague) fill in the params +} + +// uriValidator validates if the answer provided in prompt is a valid +// url and has non-empty scheme. +func uriValidator(ans interface{}) error { + return validate.URI(ans.(string)) +} diff --git a/pkg/destroy/packet/OWNERS b/pkg/destroy/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/pkg/destroy/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/pkg/destroy/packet/doc.go b/pkg/destroy/packet/doc.go new file mode 100644 index 00000000000..19d0c21a0bd --- /dev/null +++ b/pkg/destroy/packet/doc.go @@ -0,0 +1,2 @@ +// Package packet provides a cluster-destroyer for ovirt clusters. +package packet diff --git a/pkg/destroy/packet/packet.go b/pkg/destroy/packet/packet.go index e69de29bb2d..aad45aeb369 100644 --- a/pkg/destroy/packet/packet.go +++ b/pkg/destroy/packet/packet.go @@ -0,0 +1,39 @@ +package packet + +import ( + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/openshift/installer/pkg/asset/installconfig/packet" + "github.com/openshift/installer/pkg/destroy/providers" + "github.com/openshift/installer/pkg/types" +) + +// ClusterUninstaller holds the various options for the cluster we want to delete. +type ClusterUninstaller struct { + Metadata types.ClusterMetadata + Logger logrus.FieldLogger +} + +// Run is the entrypoint to start the uninstall process. +func (uninstaller *ClusterUninstaller) Run() error { + _, err := packet.NewConnection() + if err != nil { + return fmt.Errorf("failed to initialize connection to packet-engine's %s", err) + } + // @TODO(displague) delete each thing + //if err := uninstaller.deleteThing(con); err != nil { + // uninstaller.Logger.Errorf("Failed to remove Thing: %s", err) + // } + + return nil +} + +// New returns Packet Uninstaller from ClusterMetadata. +func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers.Destroyer, error) { + return &ClusterUninstaller{ + Metadata: *metadata, + Logger: logger, + }, nil +} diff --git a/pkg/terraform/exec/plugins/packet.go b/pkg/terraform/exec/plugins/packet.go index e69de29bb2d..3f1bfcb5d94 100644 --- a/pkg/terraform/exec/plugins/packet.go +++ b/pkg/terraform/exec/plugins/packet.go @@ -0,0 +1,15 @@ +package plugins + +import ( + "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/terraform-providers/terraform-provider-packet/packet" +) + +func init() { + exec := func() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: packet.Provider, + }) + } + KnownPlugins["terraform-provider-packet"] = exec +} diff --git a/pkg/terraform/gather/packet/ip.go b/pkg/terraform/gather/packet/ip.go index e69de29bb2d..9cf29af89fe 100644 --- a/pkg/terraform/gather/packet/ip.go +++ b/pkg/terraform/gather/packet/ip.go @@ -0,0 +1,18 @@ +// Package packet supply utilities to extract information from terraform state +package packet + +import ( + "github.com/openshift/installer/pkg/terraform" +) + +// BootstrapIP returns the ip address for bootstrap host. +// TODO(displague) implement +func BootstrapIP(tfs *terraform.State) (string, error) { + return "", nil +} + +// ControlPlaneIPs returns the ip addresses for control plane hosts. +// TODO(displague) implement +func ControlPlaneIPs(tfs *terraform.State) ([]string, error) { + return []string{""}, nil +} diff --git a/pkg/types/packet/OWNERS b/pkg/types/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/pkg/types/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/pkg/types/packet/defaults/platform.go b/pkg/types/packet/defaults/platform.go new file mode 100644 index 00000000000..d3fca4c1478 --- /dev/null +++ b/pkg/types/packet/defaults/platform.go @@ -0,0 +1,7 @@ +package defaults + +// Defaults for the baremetal platform. +const ( + // TODO(displague) what API? + DefaultURI = "https://api.packet.com" +) \ No newline at end of file diff --git a/pkg/types/packet/defaults/platform_test.go b/pkg/types/packet/defaults/platform_test.go new file mode 100644 index 00000000000..20c56eff6a9 --- /dev/null +++ b/pkg/types/packet/defaults/platform_test.go @@ -0,0 +1 @@ +package defaults diff --git a/pkg/types/packet/doc.go b/pkg/types/packet/doc.go new file mode 100644 index 00000000000..8fefb29d414 --- /dev/null +++ b/pkg/types/packet/doc.go @@ -0,0 +1,6 @@ +// Package packet contains packet-specific structures for +// installer configuration and management. +package packet + +// Name is the name for the packet platform. +const Name string = "packet" diff --git a/pkg/types/packet/platform.go b/pkg/types/packet/platform.go new file mode 100644 index 00000000000..4a41e9d482a --- /dev/null +++ b/pkg/types/packet/platform.go @@ -0,0 +1,5 @@ +package packet + +type Platform struct { + // TODO(displague) properties of the platform, token, project, image, network, etc +} diff --git a/pkg/types/packet/validation/machinepool.go b/pkg/types/packet/validation/machinepool.go new file mode 100644 index 00000000000..958ae1a6226 --- /dev/null +++ b/pkg/types/packet/validation/machinepool.go @@ -0,0 +1 @@ +package validation diff --git a/pkg/types/packet/validation/machinepool_test.go b/pkg/types/packet/validation/machinepool_test.go new file mode 100644 index 00000000000..958ae1a6226 --- /dev/null +++ b/pkg/types/packet/validation/machinepool_test.go @@ -0,0 +1 @@ +package validation diff --git a/pkg/types/packet/validation/platform.go b/pkg/types/packet/validation/platform.go new file mode 100644 index 00000000000..958ae1a6226 --- /dev/null +++ b/pkg/types/packet/validation/platform.go @@ -0,0 +1 @@ +package validation diff --git a/pkg/types/packet/validation/platform_test.go b/pkg/types/packet/validation/platform_test.go new file mode 100644 index 00000000000..5a3c8f3a974 --- /dev/null +++ b/pkg/types/packet/validation/platform_test.go @@ -0,0 +1 @@ +package validation \ No newline at end of file diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go new file mode 100644 index 00000000000..b9d15461e0a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go @@ -0,0 +1,152 @@ +package flatmap + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/configs/hcl2shim" +) + +// Expand takes a map and a key (prefix) and expands that value into +// a more complex structure. This is the reverse of the Flatten operation. +func Expand(m map[string]string, key string) interface{} { + // If the key is exactly a key in the map, just return it + if v, ok := m[key]; ok { + if v == "true" { + return true + } else if v == "false" { + return false + } + + return v + } + + // Check if the key is an array, and if so, expand the array + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == hcl2shim.UnknownVariableValue { + return v + } + + return expandArray(m, key) + } + + // Check if this is a prefix in the map + prefix := key + "." + for k := range m { + if strings.HasPrefix(k, prefix) { + return expandMap(m, prefix) + } + } + + return nil +} + +func expandArray(m map[string]string, prefix string) []interface{} { + num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) + if err != nil { + panic(err) + } + + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // NOTE: "num" is not necessarily accurate, e.g. if a user tampers + // with state, so the following code should not crash when given a + // number of items more or less than what's given in num. The + // num key is mainly just a hint that this is a list or set. + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[int(k)] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, len(keysList)) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) + } + + return result +} + +func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + + result := make(map[string]interface{}) + for k := range m { + if !strings.HasPrefix(k, prefix) { + continue + } + + key := k[len(prefix):] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + if _, ok := result[key]; ok { + continue + } + + // skip the map count value + if key == "%" { + continue + } + + result[key] = Expand(m, k[:len(prefix)+len(key)]) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go new file mode 100644 index 00000000000..9ff6e426526 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go @@ -0,0 +1,71 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go new file mode 100644 index 00000000000..46b72c4014a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go @@ -0,0 +1,82 @@ +package flatmap + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k, _ := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k, _ := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k, _ := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go new file mode 100644 index 00000000000..f470c9b4bee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go @@ -0,0 +1,28 @@ +package config + +import ( + "github.com/mitchellh/mapstructure" +) + +func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { + var md mapstructure.Metadata + decoderConfig := &mapstructure.DecoderConfig{ + Metadata: &md, + Result: target, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decoderConfig) + if err != nil { + return nil, err + } + + for _, raw := range raws { + err := decoder.Decode(raw) + if err != nil { + return nil, err + } + } + + return &md, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go new file mode 100644 index 00000000000..1a6e023b606 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go @@ -0,0 +1,214 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform/flatmap" + "github.com/hashicorp/terraform/terraform" +) + +// Validator is a helper that helps you validate the configuration +// of your resource, resource provider, etc. +// +// At the most basic level, set the Required and Optional lists to be +// specifiers of keys that are required or optional. If a key shows up +// that isn't in one of these two lists, then an error is generated. +// +// The "specifiers" allowed in this is a fairly rich syntax to help +// describe the format of your configuration: +// +// * Basic keys are just strings. For example: "foo" will match the +// "foo" key. +// +// * Nested structure keys can be matched by doing +// "listener.*.foo". This will verify that there is at least one +// listener element that has the "foo" key set. +// +// * The existence of a nested structure can be checked by simply +// doing "listener.*" which will verify that there is at least +// one element in the "listener" structure. This is NOT +// validating that "listener" is an array. It is validating +// that it is a nested structure in the configuration. +// +type Validator struct { + Required []string + Optional []string +} + +func (v *Validator) Validate( + c *terraform.ResourceConfig) (ws []string, es []error) { + // Flatten the configuration so it is easier to reason about + flat := flatmap.Flatten(c.Raw) + + keySet := make(map[string]validatorKey) + for i, vs := range [][]string{v.Required, v.Optional} { + req := i == 0 + for _, k := range vs { + vk, err := newValidatorKey(k, req) + if err != nil { + es = append(es, err) + continue + } + + keySet[k] = vk + } + } + + purged := make([]string, 0) + for _, kv := range keySet { + p, w, e := kv.Validate(flat) + if len(w) > 0 { + ws = append(ws, w...) + } + if len(e) > 0 { + es = append(es, e...) + } + + purged = append(purged, p...) + } + + // Delete all the keys we processed in order to find + // the unknown keys. + for _, p := range purged { + delete(flat, p) + } + + // The rest are unknown + for k, _ := range flat { + es = append(es, fmt.Errorf("Unknown configuration: %s", k)) + } + + return +} + +type validatorKey interface { + // Validate validates the given configuration and returns viewed keys, + // warnings, and errors. + Validate(map[string]string) ([]string, []string, []error) +} + +func newValidatorKey(k string, req bool) (validatorKey, error) { + var result validatorKey + + parts := strings.Split(k, ".") + if len(parts) > 1 && parts[1] == "*" { + result = &nestedValidatorKey{ + Parts: parts, + Required: req, + } + } else { + result = &basicValidatorKey{ + Key: k, + Required: req, + } + } + + return result, nil +} + +// basicValidatorKey validates keys that are basic such as "foo" +type basicValidatorKey struct { + Key string + Required bool +} + +func (v *basicValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + for k, _ := range m { + // If we have the exact key its a match + if k == v.Key { + return []string{k}, nil, nil + } + } + + if !v.Required { + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", v.Key)} +} + +type nestedValidatorKey struct { + Parts []string + Required bool +} + +func (v *nestedValidatorKey) validate( + m map[string]string, + prefix string, + offset int) ([]string, []string, []error) { + if offset >= len(v.Parts) { + // We're at the end. Look for a specific key. + v2 := &basicValidatorKey{Key: prefix, Required: v.Required} + return v2.Validate(m) + } + + current := v.Parts[offset] + + // If we're at offset 0, special case to start at the next one. + if offset == 0 { + return v.validate(m, current, offset+1) + } + + // Determine if we're doing a "for all" or a specific key + if current != "*" { + // We're looking at a specific key, continue on. + return v.validate(m, prefix+"."+current, offset+1) + } + + // We're doing a "for all", so we loop over. + countStr, ok := m[prefix+".#"] + if !ok { + if !v.Required { + // It wasn't required, so its no problem. + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", prefix)} + } + + count, err := strconv.ParseInt(countStr, 0, 0) + if err != nil { + // This shouldn't happen if flatmap works properly + panic("invalid flatmap array") + } + + var e []error + var w []string + u := make([]string, 1, count+1) + u[0] = prefix + ".#" + for i := 0; i < int(count); i++ { + prefix := fmt.Sprintf("%s.%d", prefix, i) + + // Mark that we saw this specific key + u = append(u, prefix) + + // Mark all prefixes of this + for k, _ := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + u = append(u, k) + } + + // If we have more parts, then validate deeper + if offset+1 < len(v.Parts) { + u2, w2, e2 := v.validate(m, prefix, offset+1) + + u = append(u, u2...) + w = append(w, w2...) + e = append(e, e2...) + } + } + + return u, w, e +} + +func (v *nestedValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + return v.validate(m, "", 0) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go new file mode 100644 index 00000000000..7ee21614b9f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go @@ -0,0 +1,79 @@ +package resource + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go new file mode 100644 index 00000000000..f8dcd124af0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go @@ -0,0 +1,43 @@ +package resource + +import ( + "context" + "net" + "time" + + proto "github.com/hashicorp/terraform-plugin-sdk/tfplugin5" + "github.com/hashicorp/terraform/helper/plugin" + tfplugin "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC +// shim and starts it in a grpc server using an inmem connection. It returns a +// GRPCClient for this new server to test the shimmed resource provider. +func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { + listener := bufconn.Listen(256 * 1024) + grpcServer := grpc.NewServer() + + p := plugin.NewGRPCProviderServerShim(rp) + proto.RegisterProviderServer(grpcServer, p) + + go grpcServer.Serve(listener) + + conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { + return listener.Dial() + }), grpc.WithInsecure()) + if err != nil { + panic(err) + } + + var pp tfplugin.GRPCProviderPlugin + client, _ := pp.GRPCClient(context.Background(), nil, conn) + + grpcClient := client.(*tfplugin.GRPCProvider) + grpcClient.TestServer = grpcServer + + return grpcClient +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go new file mode 100644 index 00000000000..44949550e73 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go @@ -0,0 +1,45 @@ +package resource + +import ( + "fmt" + "strings" + "sync" + "time" +) + +const UniqueIdPrefix = `terraform-` + +// idCounter is a monotonic counter for generating ordered unique ids. +var idMutex sync.Mutex +var idCounter uint32 + +// Helper for a resource to generate a unique identifier w/ default prefix +func UniqueId() string { + return PrefixedUniqueId(UniqueIdPrefix) +} + +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +const UniqueIDSuffixLength = 26 + +// Helper for a resource to generate a unique identifier w/ given prefix +// +// After the prefix, the ID consists of an incrementing 26 digit value (to match +// previous timestamp output). After the prefix, the ID consists of a timestamp +// and an incrementing 8 hex digit value The timestamp means that multiple IDs +// created with the same prefix will sort in the order of their creation, even +// across multiple terraform executions, as long as the clock is not turned back +// between calls, and as long as any given terraform execution generates fewer +// than 4 billion IDs. +func PrefixedUniqueId(prefix string) string { + // Be precise to 4 digits of fractional seconds, but remove the dot before the + // fractional seconds. + timestamp := strings.Replace( + time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) + + idMutex.Lock() + defer idMutex.Unlock() + idCounter++ + return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go new file mode 100644 index 00000000000..a465136f778 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go @@ -0,0 +1,140 @@ +package resource + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform/terraform" +) + +// Map is a map of resources that are supported, and provides helpers for +// more easily implementing a ResourceProvider. +type Map struct { + Mapping map[string]Resource +} + +func (m *Map) Validate( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := m.Mapping[t] + if !ok { + return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} + } + + // If there is no validator set, then it is valid + if r.ConfigValidator == nil { + return nil, nil + } + + return r.ConfigValidator.Validate(c) +} + +// Apply performs a create or update depending on the diff, and calls +// the proper function on the matching Resource. +func (m *Map) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource if it is created + err := r.Destroy(s, meta) + if err != nil { + return s, err + } + + s.ID = "" + } + + // If we're only destroying, and not creating, then return now. + // Otherwise, we continue so that we can create a new resource. + if !d.RequiresNew() { + return nil, nil + } + } + + var result *terraform.InstanceState + var err error + if s.ID == "" { + result, err = r.Create(s, d, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf( + "Resource type '%s' doesn't support update", + info.Type) + } + + result, err = r.Update(s, d, meta) + } + if result != nil { + if result.Attributes == nil { + result.Attributes = make(map[string]string) + } + + result.Attributes["id"] = result.ID + } + + return result, err +} + +// Diff performs a diff on the proper resource type. +func (m *Map) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, meta) +} + +// Refresh performs a Refresh on the proper resource type. +// +// Refresh on the Resource won't be called if the state represents a +// non-created resource (ID is blank). +// +// An error is returned if the resource isn't registered. +func (m *Map) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the resource isn't created, don't refresh. + if s.ID == "" { + return s, nil + } + + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Refresh(s, meta) +} + +// Resources returns all the resources that are supported by this +// resource map and can be used to satisfy the Resources method of +// a ResourceProvider. +func (m *Map) Resources() []terraform.ResourceType { + ks := make([]string, 0, len(m.Mapping)) + for k, _ := range m.Mapping { + ks = append(ks, k) + } + sort.Strings(ks) + + rs := make([]terraform.ResourceType, 0, len(m.Mapping)) + for _, k := range ks { + rs = append(rs, terraform.ResourceType{ + Name: k, + }) + } + + return rs +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go new file mode 100644 index 00000000000..0d9c831a651 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go @@ -0,0 +1,49 @@ +package resource + +import ( + "github.com/hashicorp/terraform/helper/config" + "github.com/hashicorp/terraform/terraform" +) + +type Resource struct { + ConfigValidator *config.Validator + Create CreateFunc + Destroy DestroyFunc + Diff DiffFunc + Refresh RefreshFunc + Update UpdateFunc +} + +// CreateFunc is a function that creates a resource that didn't previously +// exist. +type CreateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) + +// DestroyFunc is a function that destroys a resource that previously +// exists using the state. +type DestroyFunc func( + *terraform.InstanceState, + interface{}) error + +// DiffFunc is a function that performs a diff of a resource. +type DiffFunc func( + *terraform.InstanceState, + *terraform.ResourceConfig, + interface{}) (*terraform.InstanceDiff, error) + +// RefreshFunc is a function that performs a refresh of a specific type +// of resource. +type RefreshFunc func( + *terraform.InstanceState, + interface{}) (*terraform.InstanceState, error) + +// UpdateFunc is a function that is called to update a resource that +// previously existed. The difference between this and CreateFunc is that +// the diff is guaranteed to only contain attributes that don't require +// a new resource. +type UpdateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go new file mode 100644 index 00000000000..88a839664c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go @@ -0,0 +1,259 @@ +package resource + +import ( + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + time.Sleep(conf.Delay) + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go new file mode 100644 index 00000000000..257109d3b6c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go @@ -0,0 +1,188 @@ +package resource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests +func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { + state := terraform.NewState() + + // in the odd case of a nil state, let the helper packages handle it + if newState == nil { + return nil, nil + } + + for _, newMod := range newState.Modules { + mod := state.AddModule(newMod.Addr) + + for name, out := range newMod.OutputValues { + outputType := "" + val := hcl2shim.ConfigValueFromHCL2(out.Value) + ty := out.Value.Type() + switch { + case ty == cty.String: + outputType = "string" + case ty.IsTupleType() || ty.IsListType(): + outputType = "list" + case ty.IsMapType(): + outputType = "map" + } + + mod.Outputs[name] = &terraform.OutputState{ + Type: outputType, + Value: val, + Sensitive: out.Sensitive, + } + } + + for _, res := range newMod.Resources { + resType := res.Addr.Type + providerType := res.ProviderConfig.ProviderConfig.Type + + resource := getResource(providers, providerType.LegacyString(), res.Addr) + + for key, i := range res.Instances { + resState := &terraform.ResourceState{ + Type: resType, + Provider: res.ProviderConfig.String(), + } + + // We should always have a Current instance here, but be safe about checking. + if i.Current != nil { + flatmap, err := shimmedAttributes(i.Current, resource) + if err != nil { + return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if i.Current.Private != nil { + err := json.Unmarshal(i.Current.Private, &meta) + if err != nil { + return nil, err + } + } + + resState.Primary = &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: i.Current.Status == states.ObjectTainted, + Meta: meta, + } + + if i.Current.SchemaVersion != 0 { + if resState.Primary.Meta == nil { + resState.Primary.Meta = map[string]interface{}{} + } + resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion + } + + for _, dep := range i.Current.DependsOn { + resState.Dependencies = append(resState.Dependencies, dep.String()) + } + + // convert the indexes to the old style flapmap indexes + idx := "" + switch key.(type) { + case addrs.IntKey: + // don't add numeric index values to resources with a count of 0 + if len(res.Instances) > 1 { + idx = fmt.Sprintf(".%d", key) + } + case addrs.StringKey: + idx = "." + key.String() + } + + mod.Resources[res.Addr.String()+idx] = resState + } + + // add any deposed instances + for _, dep := range i.Deposed { + flatmap, err := shimmedAttributes(dep, resource) + if err != nil { + return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if dep.Private != nil { + err := json.Unmarshal(dep.Private, &meta) + if err != nil { + return nil, err + } + } + + deposed := &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: dep.Status == states.ObjectTainted, + Meta: meta, + } + if dep.SchemaVersion != 0 { + deposed.Meta = map[string]interface{}{ + "schema_version": dep.SchemaVersion, + } + } + + resState.Deposed = append(resState.Deposed, deposed) + } + } + } + } + + return state, nil +} + +func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { + p := providers[providerName] + if p == nil { + panic(fmt.Sprintf("provider %q not found in test step", providerName)) + } + + // this is only for tests, so should only see schema.Providers + provider := p.(*schema.Provider) + + switch addr.Mode { + case addrs.ManagedResourceMode: + resource := provider.ResourcesMap[addr.Type] + if resource != nil { + return resource + } + case addrs.DataResourceMode: + resource := provider.DataSourcesMap[addr.Type] + if resource != nil { + return resource + } + } + + panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) +} + +func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { + flatmap := instance.AttrsFlat + if flatmap != nil { + return flatmap, nil + } + + // if we have json attrs, they need to be decoded + rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) + if err != nil { + return nil, err + } + + instanceState, err := res.ShimInstanceStateFromValue(rio.Value) + if err != nil { + return nil, err + } + + return instanceState.Attributes, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go new file mode 100644 index 00000000000..3153d8447d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -0,0 +1,1320 @@ +package resource + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "syscall" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform-plugin-sdk/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// map of sweepers that have ran, and the success/fail status based on any error +// raised +var sweeperRunList map[string]bool + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m *testing.M) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + for _, region := range regions { + region = strings.TrimSpace(region) + // reset sweeperRunList for each region + sweeperRunList = map[string]bool{} + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper); err != nil { + log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) + } + } + + log.Printf("Sweeper Tests ran:\n") + for s, _ := range sweeperRunList { + fmt.Printf("\t- %s\n", s) + } + } + } else { + os.Exit(m.Run()) + } +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name, sweeper := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + sweepers[name] = sweeper + } + } + } + return sweepers +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper) error { + for _, dep := range s.Dependencies { + if depSweeper, ok := sweeperFuncs[dep]; ok { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + if err := runSweeperWithRegion(region, depSweeper); err != nil { + return err + } + } else { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + runE := s.F(region) + if runE == nil { + sweeperRunList[s.Name] = true + } else { + sweeperRunList[s.Name] = false + } + + return runE +} + +const TestEnvVar = "TF_ACC" + +// TestProvider can be implemented by any ResourceProvider to provide custom +// reset functionality at the start of an acceptance test. +// The helper/schema Provider implements this interface. +type TestProvider interface { + TestReset() error +} + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // Providers is the ResourceProvider that will be under test. + // + // Alternately, ProviderFactories can be specified for the providers + // that are valid. This takes priority over Providers. + // + // The end effect of each is the same: specifying the providers that + // are used within the tests. + Providers map[string]terraform.ResourceProvider + ProviderFactories map[string]terraform.ResourceProviderFactory + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // The settings below control the "ID-only refresh test." This is + // an enabled-by-default test that tests that a refresh can be + // refreshed with only an ID to result in the same attributes. + // This validates completeness of Refresh. + // + // IDRefreshName is the name of the resource to check. This will + // default to the first non-nil primary resource in the state. + // + // IDRefreshIgnore is a list of configuration keys that will be ignored. + IDRefreshName string + IDRefreshIgnore []string +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. Note that this checks for strict equality + // and does not respect DiffSuppressFunc or CustomizeDiff. + // + // ImportStateVerifyIgnore is a list of prefixes of fields that should + // not be verified to be equal. These can be set to ephemeral fields or + // fields that can't be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string + + // provider s is used internally to maintain a reference to the + // underlying providers during the tests + providers map[string]terraform.ResourceProvider +} + +// Set to a file mask in sprintf format where %s is test name +const EnvLogPathMask = "TF_LOG_PATH_MASK" + +func LogOutput(t TestT) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := logging.CurrentLogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + + if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logging.LevelFilter{ + Levels: logging.ValidLevels, + MinLevel: logging.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t TestT, c TestCase) { + t.Parallel() + Test(t, c) +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + logWriter, err := LogOutput(t) + if err != nil { + t.Error(fmt.Errorf("error setting up logging: %s", err)) + } + log.SetOutput(logWriter) + + // We require verbose mode so that the user knows what is going on. + if !testTesting && !testing.Verbose() && !c.IsUnitTest { + t.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + // get instances of all providers, so we can use the individual + // resources to shim the state during the tests. + providers := make(map[string]terraform.ResourceProvider) + for name, pf := range testProviderFactories(c) { + p, err := pf() + if err != nil { + t.Fatal(err) + } + providers[name] = p + } + + providerResolver, err := testProviderResolver(c) + if err != nil { + t.Fatal(err) + } + + opts := terraform.ContextOpts{ProviderResolver: providerResolver} + + // A single state variable to track the lifecycle, starting with no state + var state *terraform.State + + // Go through each step and run it + var idRefreshCheck *terraform.ResourceState + idRefresh := c.IDRefreshName != "" + errored := false + for i, step := range c.Steps { + // insert the providers into the step so we can get the resources for + // shimming the state + step.providers = providers + + var err error + log.Printf("[DEBUG] Test: Executing step %d", i) + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d", i) + continue + } + } + + if step.Config == "" && !step.ImportState { + err = fmt.Errorf( + "unknown test mode for step. Please see TestStep docs\n\n%#v", + step) + } else { + if step.ImportState { + if step.Config == "" { + step.Config = testProviderConfig(c) + } + + // Can optionally set step.Config in addition to + // step.ImportState, to provide config for the import. + state, err = testStepImportState(opts, state, step) + } else { + state, err = testStepConfig(opts, state, step) + } + } + + // If we expected an error, but did not get one, fail + if err == nil && step.ExpectError != nil { + errored = true + t.Error(fmt.Sprintf( + "Step %d, no error received, but expected a match to:\n\n%s\n\n", + i, step.ExpectError)) + break + } + + // If there was an error, exit + if err != nil { + // Perhaps we expected an error? Check if it matches + if step.ExpectError != nil { + if !step.ExpectError.MatchString(err.Error()) { + errored = true + t.Error(fmt.Sprintf( + "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", + i, err, step.ExpectError)) + break + } + } else { + errored = true + t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) + break + } + } + + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + if idRefresh && idRefreshCheck == nil && !state.Empty() { + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + log.Printf( + "[WARN] Test: Running ID-only refresh check on %s", + idRefreshCheck.Primary.ID) + if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { + log.Printf("[ERROR] Test: ID-only test failed: %s", err) + t.Error(fmt.Sprintf( + "[ERROR] Test: ID-only test failed: %s", err)) + break + } + } + } + } + + // If we never checked an id-only refresh, it is a failure. + if idRefresh { + if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { + t.Error("ID-only refresh check never ran.") + } + } + + // If we have a state, then run the destroy + if state != nil { + lastStep := c.Steps[len(c.Steps)-1] + destroyStep := TestStep{ + Config: lastStep.Config, + Check: c.CheckDestroy, + Destroy: true, + PreventDiskCleanup: lastStep.PreventDiskCleanup, + PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, + providers: providers, + } + + log.Printf("[WARN] Test: Executing destroy step") + state, err := testStep(opts, state, destroyStep) + if err != nil { + t.Error(fmt.Sprintf( + "Error destroying resource! WARNING: Dangling resources\n"+ + "may exist. The full state and error is shown below.\n\n"+ + "Error: %s\n\nState: %s", + err, + state)) + } + } else { + log.Printf("[WARN] Skipping destroy test since there is no state.") + } +} + +// testProviderConfig takes the list of Providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func testProviderConfig(c TestCase) string { + var lines []string + for p := range c.Providers { + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + } + + return strings.Join(lines, "") +} + +// testProviderFactories combines the fixed Providers and +// ResourceProviderFactory functions into a single map of +// ResourceProviderFactory functions. +func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { + ctxProviders := make(map[string]terraform.ResourceProviderFactory) + for k, pf := range c.ProviderFactories { + ctxProviders[k] = pf + } + + // add any fixed providers + for k, p := range c.Providers { + ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) + } + return ctxProviders +} + +// testProviderResolver is a helper to build a ResourceProviderResolver +// with pre instantiated ResourceProviders, so that we can reset them for the +// test, while only calling the factory function once. +// Any errors are stored so that they can be returned by the factory in +// terraform to match non-test behavior. +func testProviderResolver(c TestCase) (providers.Resolver, error) { + ctxProviders := testProviderFactories(c) + + // wrap the old provider factories in the test grpc server so they can be + // called from terraform. + newProviders := make(map[addrs.Provider]providers.Factory) + + for k, pf := range ctxProviders { + factory := pf // must copy to ensure each closure sees its own value + newProviders[addrs.NewLegacyProvider(k)] = func() (providers.Interface, error) { + p, err := factory() + if err != nil { + return nil, err + } + + // The provider is wrapped in a GRPCTestProvider so that it can be + // passed back to terraform core as a providers.Interface, rather + // than the legacy ResourceProvider. + return GRPCTestProvider(p), nil + } + } + + return providers.ResolverFixed(newProviders), nil +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +func UnitTest(t TestT, c TestCase) { + c.IsUnitTest = true + Test(t, c) +} + +func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { + // TODO: We guard by this right now so master doesn't explode. We + // need to remove this eventually to make this part of the normal tests. + if os.Getenv("TF_ACC_IDONLY") == "" { + return nil + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: r.Type, + Name: "foo", + }.Instance(addrs.NoKey) + absAddr := addr.Absolute(addrs.RootModuleInstance) + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := states.NewState() + state.RootModule().SetResourceInstanceCurrent( + addr, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: r.Primary.Attributes, + Status: states.ObjectReady, + }, + addrs.ProviderConfig{Type: addrs.NewLegacyProvider("placeholder")}.Absolute(addrs.RootModuleInstance), + ) + + // Create the config module. We use the full config because Refresh + // doesn't have access to it and we may need things like provider + // configurations. The initial implementation of id-only checks used + // an empty config module, but that caused the aforementioned problems. + cfg, err := testConfig(opts, step) + if err != nil { + return err + } + + // Initialize the context + opts.Config = cfg + opts.State = state + ctx, ctxDiags := terraform.NewContext(&opts) + if ctxDiags.HasErrors() { + return ctxDiags.Err() + } + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) + } + + log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) + } + + // Refresh! + state, refreshDiags := ctx.Refresh() + if refreshDiags.HasErrors() { + return refreshDiags.Err() + } + + // Verify attribute equivalence. + actualR := state.ResourceInstance(absAddr) + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Current == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Current.AttrsFlat + expected := r.Primary.Attributes + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k, _ := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k, _ := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return fmt.Errorf( + "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + + return nil +} + +func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { + if step.PreConfig != nil { + step.PreConfig() + } + + cfgPath, err := ioutil.TempDir("", "tf-test") + if err != nil { + return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) + } + + if step.PreventDiskCleanup { + log.Printf("[INFO] Skipping defer os.RemoveAll call") + } else { + defer os.RemoveAll(cfgPath) + } + + // Write the main configuration file + err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) + if err != nil { + return nil, fmt.Errorf("Error creating temporary file for config: %s", err) + } + + // Create directory for our child modules, if any. + modulesDir := filepath.Join(cfgPath, ".modules") + err = os.Mkdir(modulesDir, os.ModePerm) + if err != nil { + return nil, fmt.Errorf("Error creating child modules directory: %s", err) + } + + inst := initwd.NewModuleInstaller(modulesDir, nil) + _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) + if installDiags.HasErrors() { + return nil, installDiags.Err() + } + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + return nil, fmt.Errorf("failed to create config loader: %s", err) + } + + config, configDiags := loader.LoadConfig(cfgPath) + if configDiags.HasErrors() { + return nil, configDiags + } + + return config, nil +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + if c.ResourceName == "" { + return nil, fmt.Errorf("ResourceName must be set in TestStep") + } + + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result *multierror.Error + + for i, f := range fs { + if err := f(s); err != nil { + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) + } + } + + return result.ErrorOrNil() + } +} + +// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value +// exists in state for the given name/key combination. It is useful when +// testing that computed values were set, when it is not possible to +// know ahead of time what the values will be. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + } +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + } +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + } + + return nil +} + +// TestCheckResourceAttr is a TestCheckFunc which validates +// the value in state for the given name/key combination. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + } +} + +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + } +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + +// TestCheckNoResourceAttr is a TestCheckFunc which ensures that +// NO value exists in state for the given name/key combination. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + } +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + } +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { + return nil + } + + if exists { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil +} + +// TestMatchResourceAttr is a TestCheckFunc which checks that the value +// in state for the given name/key combination matches the given regex. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + } +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + } +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values +// in state for a pair of name/key combinations are equal. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err + } + + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) + Name() string + Parallel() +} + +// This is set to true by unit tests to alter some behavior +var testTesting = false + +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) + } + + return is, nil +} + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} + +// operationError is a specialized implementation of error used to describe +// failures during one of the several operations performed for a particular +// test case. +type operationError struct { + OpName string + Diags tfdiags.Diagnostics +} + +func newOperationError(opName string, diags tfdiags.Diagnostics) error { + return operationError{opName, diags} +} + +// Error returns a terse error string containing just the basic diagnostic +// messages, for situations where normal Go error behavior is appropriate. +func (err operationError) Error() string { + return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) +} + +// ErrorDetail is like Error except it includes verbosely-rendered diagnostics +// similar to what would come from a normal Terraform run, which include +// additional context not included in Error(). +func (err operationError) ErrorDetail() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "errors during %s:", err.OpName) + clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} + for _, diag := range err.Diags { + diagStr := format.Diagnostic(diag, nil, clr, 78) + buf.WriteByte('\n') + buf.WriteString(diagStr) + } + return buf.String() +} + +// detailedErrorMessage is a helper for calling ErrorDetail on an error if +// it is an operationError or just taking Error otherwise. +func detailedErrorMessage(err error) string { + switch tErr := err.(type) { + case operationError: + return tErr.ErrorDetail() + default: + return err.Error() + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go new file mode 100644 index 00000000000..c3893798d23 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go @@ -0,0 +1,404 @@ +package resource + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/tfdiags" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" +) + +// testStepConfig runs a config-mode test step +func testStepConfig( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + return testStep(opts, state, step) +} + +func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + if !step.Destroy { + if err := testStepTaint(state, step); err != nil { + return state, err + } + } + + cfg, err := testConfig(opts, step) + if err != nil { + return state, err + } + + var stepDiags tfdiags.Diagnostics + + // Build the context + opts.Config = cfg + opts.State, err = terraform.ShimLegacyState(state) + if err != nil { + return nil, err + } + + opts.Destroy = step.Destroy + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) + } + if stepDiags := ctx.Validate(); len(stepDiags) > 0 { + if stepDiags.HasErrors() { + return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) + } + + log.Printf("[WARN] Config warnings:\n%s", stepDiags) + } + + // Refresh! + newState, stepDiags := ctx.Refresh() + // shim the state first so the test can check the state on errors + + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("refresh", stepDiags) + } + + // If this step is a PlanOnly step, skip over this first Plan and subsequent + // Apply, and use the follow up Plan that checks for perpetual diffs + if !step.PlanOnly { + // Plan! + if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("plan", stepDiags) + } else { + log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) + } + + // We need to keep a copy of the state prior to destroying + // such that destroy steps can verify their behavior in the check + // function + stateBeforeApplication := state.DeepCopy() + + // Apply the diff, creating real resources. + newState, stepDiags = ctx.Apply() + // shim the state first so the test can check the state on errors + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("apply", stepDiags) + } + + // Run any configured checks + if step.Check != nil { + if step.Destroy { + if err := step.Check(stateBeforeApplication); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } else { + if err := step.Check(state); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } + } + } + + // Now, verify that Plan is now empty and we don't have a perpetual diff issue + // We do this with TWO plans. One without a refresh. + var p *plans.Plan + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("follow-up plan", stepDiags) + } + if !p.Changes.Empty() { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } else { + return state, fmt.Errorf( + "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } + } + + // And another after a Refresh. + if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { + newState, stepDiags = ctx.Refresh() + if stepDiags.HasErrors() { + return state, newOperationError("follow-up refresh", stepDiags) + } + + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + } + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("second follow-up refresh", stepDiags) + } + empty := p.Changes.Empty() + + // Data resources are tricky because they legitimately get instantiated + // during refresh so that they will be already populated during the + // plan walk. Because of this, if we have any data resources in the + // config we'll end up wanting to destroy them again here. This is + // acceptable and expected, and we'll treat it as "empty" for the + // sake of this testing. + if step.Destroy && !empty { + empty = true + for _, change := range p.Changes.Resources { + if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { + empty = false + break + } + } + } + + if !empty { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } else { + return state, fmt.Errorf( + "After applying this step and refreshing, "+ + "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } + } + + // Made it here, but expected a non-empty plan, fail! + if step.ExpectNonEmptyPlan && empty { + return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") + } + + // Made it here? Good job test step! + return state, nil +} + +// legacyPlanComparisonString produces a string representation of the changes +// from a plan and a given state togther, as was formerly produced by the +// String method of terraform.Plan. +// +// This is here only for compatibility with existing tests that predate our +// new plan and state types, and should not be used in new tests. Instead, use +// a library like "cmp" to do a deep equality and diff on the two +// data structures. +func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { + return fmt.Sprintf( + "DIFF:\n\n%s\n\nSTATE:\n\n%s", + legacyDiffComparisonString(changes), + state.String(), + ) +} + +// legacyDiffComparisonString produces a string representation of the changes +// from a planned changes object, as was formerly produced by the String method +// of terraform.Diff. +// +// This is here only for compatibility with existing tests that predate our +// new plan types, and should not be used in new tests. Instead, use a library +// like "cmp" to do a deep equality check and diff on the two data structures. +func legacyDiffComparisonString(changes *plans.Changes) string { + // The old string representation of a plan was grouped by module, but + // our new plan structure is not grouped in that way and so we'll need + // to preprocess it in order to produce that grouping. + type ResourceChanges struct { + Current *plans.ResourceInstanceChangeSrc + Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc + } + byModule := map[string]map[string]*ResourceChanges{} + resourceKeys := map[string][]string{} + requiresReplace := map[string][]string{} + var moduleKeys []string + for _, rc := range changes.Resources { + if rc.Action == plans.NoOp { + // We won't mention no-op changes here at all, since the old plan + // model we are emulating here didn't have such a concept. + continue + } + moduleKey := rc.Addr.Module.String() + if _, exists := byModule[moduleKey]; !exists { + moduleKeys = append(moduleKeys, moduleKey) + byModule[moduleKey] = make(map[string]*ResourceChanges) + } + resourceKey := rc.Addr.Resource.String() + if _, exists := byModule[moduleKey][resourceKey]; !exists { + resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) + byModule[moduleKey][resourceKey] = &ResourceChanges{ + Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), + } + } + + if rc.DeposedKey == states.NotDeposed { + byModule[moduleKey][resourceKey].Current = rc + } else { + byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc + } + + rr := []string{} + for _, p := range rc.RequiredReplace.List() { + rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) + } + requiresReplace[resourceKey] = rr + } + sort.Strings(moduleKeys) + for _, ks := range resourceKeys { + sort.Strings(ks) + } + + var buf bytes.Buffer + + for _, moduleKey := range moduleKeys { + rcs := byModule[moduleKey] + var mBuf bytes.Buffer + + for _, resourceKey := range resourceKeys[moduleKey] { + rc := rcs[resourceKey] + + forceNewAttrs := requiresReplace[resourceKey] + + crud := "UPDATE" + if rc.Current != nil { + switch rc.Current.Action { + case plans.DeleteThenCreate: + crud = "DESTROY/CREATE" + case plans.CreateThenDelete: + crud = "CREATE/DESTROY" + case plans.Delete: + crud = "DESTROY" + case plans.Create: + crud = "CREATE" + } + } else { + // We must be working on a deposed object then, in which + // case destroying is the only possible action. + crud = "DESTROY" + } + + extra := "" + if rc.Current == nil && len(rc.Deposed) > 0 { + extra = " (deposed only)" + } + + fmt.Fprintf( + &mBuf, "%s: %s%s\n", + crud, resourceKey, extra, + ) + + attrNames := map[string]bool{} + var oldAttrs map[string]string + var newAttrs map[string]string + if rc.Current != nil { + if before := rc.Current.Before; before != nil { + ty, err := before.ImpliedType() + if err == nil { + val, err := before.Decode(ty) + if err == nil { + oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range oldAttrs { + attrNames[k] = true + } + } + } + } + if after := rc.Current.After; after != nil { + ty, err := after.ImpliedType() + if err == nil { + val, err := after.Decode(ty) + if err == nil { + newAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range newAttrs { + attrNames[k] = true + } + } + } + } + } + if oldAttrs == nil { + oldAttrs = make(map[string]string) + } + if newAttrs == nil { + newAttrs = make(map[string]string) + } + + attrNamesOrder := make([]string, 0, len(attrNames)) + keyLen := 0 + for n := range attrNames { + attrNamesOrder = append(attrNamesOrder, n) + if len(n) > keyLen { + keyLen = len(n) + } + } + sort.Strings(attrNamesOrder) + + for _, attrK := range attrNamesOrder { + v := newAttrs[attrK] + u := oldAttrs[attrK] + + if v == hcl2shim.UnknownVariableValue { + v = "" + } + // NOTE: we don't support here because we would + // need schema to do that. Excluding sensitive values + // is now done at the UI layer, and so should not be tested + // at the core layer. + + updateMsg := "" + + // This may not be as precise as in the old diff, as it matches + // everything under the attribute that was originally marked as + // ForceNew, but should help make it easier to determine what + // caused replacement here. + for _, k := range forceNewAttrs { + if strings.HasPrefix(attrK, k) { + updateMsg = " (forces new resource)" + break + } + } + + fmt.Fprintf( + &mBuf, " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, v, + updateMsg, + ) + } + } + + if moduleKey == "" { // root module + buf.Write(mBuf.Bytes()) + buf.WriteByte('\n') + continue + } + + fmt.Fprintf(&buf, "%s:\n", moduleKey) + s := bufio.NewScanner(&mBuf) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return buf.String() +} + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go new file mode 100644 index 00000000000..9a3ef1be029 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go @@ -0,0 +1,232 @@ +package resource + +import ( + "fmt" + "log" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// testStepImportState runs an import state test step +func testStepImportState( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + + // Determine the ID to import + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + return state, err + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: + resource, err := testResource(step, state) + if err != nil { + return state, err + } + importId = resource.Primary.ID + } + + importPrefix := step.ImportStateIdPrefix + if importPrefix != "" { + importId = fmt.Sprintf("%s%s", importPrefix, importId) + } + + // Setup the context. We initialize with an empty state. We use the + // full config for provider configurations. + cfg, err := testConfig(opts, step) + if err != nil { + return state, err + } + + opts.Config = cfg + + // import tests start with empty state + opts.State = states.NewState() + + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, stepDiags.Err() + } + + // The test step provides the resource address as a string, so we need + // to parse it to get an addrs.AbsResourceAddress to pass in to the + // import method. + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) + if hclDiags.HasErrors() { + return nil, hclDiags + } + importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) + if stepDiags.HasErrors() { + return nil, stepDiags.Err() + } + + // Do the import + importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ + // Set the module so that any provider config is loaded + Config: cfg, + + Targets: []*terraform.ImportTarget{ + &terraform.ImportTarget{ + Addr: importAddr, + ID: importId, + }, + }, + }) + if stepDiags.HasErrors() { + log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) + return state, stepDiags.Err() + } + + newState, err := shimNewState(importedState, step.providers) + if err != nil { + return nil, err + } + // Go through the new state and verify + if step.ImportStateCheck != nil { + var states []*terraform.InstanceState + for _, r := range newState.RootModule().Resources { + if r.Primary != nil { + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) + } + } + if err := step.ImportStateCheck(states); err != nil { + return state, err + } + } + + // Verify that all the states match + if step.ImportStateVerify { + new := newState.RootModule().Resources + old := state.RootModule().Resources + for _, r := range new { + // Find the existing resource + var oldR *terraform.ResourceState + for _, r2 := range old { + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { + oldR = r2 + break + } + } + if oldR == nil { + return state, fmt.Errorf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // We'll try our best to find the schema for this resource type + // so we can ignore Removed fields during validation. If we fail + // to find the schema then we won't ignore them and so the test + // will need to rely on explicit ImportStateVerifyIgnore, though + // this shouldn't happen in any reasonable case. + var rsrcSchema *schema.Resource + if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { + providerType := providerAddr.ProviderConfig.Type.LegacyString() + if provider, ok := step.providers[providerType]; ok { + if provider, ok := provider.(*schema.Provider); ok { + rsrcSchema = provider.ResourcesMap[r.Type] + } + } + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + actual[k] = v + } + + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + // Also remove any attributes that are marked as "Removed" in the + // schema, if we have a schema to check that against. + if rsrcSchema != nil { + for k := range actual { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(actual, k) + break + } + } + } + for k := range expected { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(expected, k) + break + } + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return state, fmt.Errorf( + "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + } + } + + // Return the old state (non-imported) so we don't change anything. + return state, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go new file mode 100644 index 00000000000..e56a5155d10 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go @@ -0,0 +1,84 @@ +package resource + +import ( + "sync" + "time" +) + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +func Retry(timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForState() + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. +func RetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/github.com/packethost/packngo/.drone.yml b/vendor/github.com/packethost/packngo/.drone.yml new file mode 100644 index 00000000000..84cd7b6dbc0 --- /dev/null +++ b/vendor/github.com/packethost/packngo/.drone.yml @@ -0,0 +1,28 @@ +workspace: + base: /go + path: src/github.com/packethost/packngo + +pipeline: + lint: + image: golang:1.11 + commands: + - go get -v -u github.com/alecthomas/gometalinter + - gometalinter --install + - go get -v -t ./... + - | + gometalinter --disable=gas ./... || : + - | + gometalinter --disable-all --enable=gas ./... || : + - | + gofmt -d . | (! grep '.') || ok=false + - if ! $ok; then exit 1; fi + + build: + image: golang:1.11 + commands: + - go build -i -v ./... + + test: + image: golang:1.11 + commands: + - go test ./... diff --git a/vendor/github.com/packethost/packngo/.gitignore b/vendor/github.com/packethost/packngo/.gitignore new file mode 100644 index 00000000000..55510dc77cf --- /dev/null +++ b/vendor/github.com/packethost/packngo/.gitignore @@ -0,0 +1,29 @@ +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.swp + +# idea +.idea**/** diff --git a/vendor/github.com/packethost/packngo/CHANGELOG.md b/vendor/github.com/packethost/packngo/CHANGELOG.md new file mode 100644 index 00000000000..34e5d311404 --- /dev/null +++ b/vendor/github.com/packethost/packngo/CHANGELOG.md @@ -0,0 +1,54 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +This release contains a bunch of fixes to the package api after some more real +world use. There a few breaks in backwards compatibility, but we are tying to +minimize them and move towards a 1.0 release. + +### Added +- "acceptance" tests which run against production api (will incur charges) +- HardwareReservation to Device +- RootPassword to Device +- Spot market support +- Management and Manageable fields to discern between Elastic IPs and device unique IP +- Support for Volume attachments to Device and Volume +- Support for ProvisionEvents +- DoRequest sugar to Client +- Add ListProject function to the SSHKeys interface +- Operations for switching between Network Modes, aka "L2 support" + Support for Organization, Payment Method and Billing address resources + +### Fixed +- User.Emails json tag is fixed to match api response +- Single error object api response is now handled correctly + +### Changed +- IPService was split to DeviceIPService and ProjectIPService +- Renamed Device.IPXEScriptUrl -> Device.IPXEScriptURL +- Renamed DeviceCreateRequest.HostName -> DeviceCreateRequest.Hostname +- Renamed DeviceCreateRequest.IPXEScriptUrl -> DeviceCreateRequest.IPXEScriptURL +- Renamed DeviceUpdateRequest.HostName -> DeviceUpdateRequest.Hostname +- Renamed DeviceUpdateRequest.IPXEScriptUrl -> DeviceUpdateRequest.IPXEScriptURL +- Sync with packet.net api change to /projects/{id}/ips which no longer returns + the address in CIDR form +- Removed package level exported functions that should have never existed + +## [0.1.0] - 2017-08-17 + +Initial release, supports most of the api for interacting with: + +- Plans +- Users +- Emails +- SSH Keys +- Devices +- Projects +- Facilities +- Operating Systems +- IP Reservations +- Volumes diff --git a/vendor/github.com/packethost/packngo/LICENSE.txt b/vendor/github.com/packethost/packngo/LICENSE.txt new file mode 100644 index 00000000000..57c50110ca2 --- /dev/null +++ b/vendor/github.com/packethost/packngo/LICENSE.txt @@ -0,0 +1,56 @@ +Copyright (c) 2014 The packngo AUTHORS. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +====================== +Portions of the client are based on code at: +https://github.com/google/go-github/ and +https://github.com/digitalocean/godo + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/packethost/packngo/README.md b/vendor/github.com/packethost/packngo/README.md new file mode 100644 index 00000000000..307c4eef767 --- /dev/null +++ b/vendor/github.com/packethost/packngo/README.md @@ -0,0 +1,127 @@ +# packngo +Packet Go Api Client + +![](https://www.packet.net/media/images/xeiw-packettwitterprofilew.png) + + +Installation +------------ + +`go get github.com/packethost/packngo` + +Usage +----- + +To authenticate to the Packet API, you must have your API token exported in env var `PACKET_AUTH_TOKEN`. + +This code snippet initializes Packet API client, and lists your Projects: + +```go +package main + +import ( + "log" + + "github.com/packethost/packngo" +) + +func main() { + c, err := packngo.NewClient() + if err != nil { + log.Fatal(err) + } + + ps, _, err := c.Projects.List(nil) + if err != nil { + log.Fatal(err) + } + for _, p := range ps { + log.Println(p.ID, p.Name) + } +} + +``` + +This lib is used by the official [terraform-provider-packet](https://github.com/terraform-providers/terraform-provider-packet). + +You can also learn a lot from the `*_test.go` sources. Almost all out tests touch the Packet API, so you can see how auth, querying and POSTing works. For example [devices_test.go](devices_test.go). + + +Linked resources in Get\* and List\* functions +---------------------------------------------- +Most of the Get and List functions have *GetOptions resp. *ListOptions paramters. If you supply them, you can specify which attributes of resources in the return set can be excluded or included. This is useful for linked resources, e.g members of a project, devices in a project. + +Linked resources usually have only the `Href` attribute populated, allowing you to fetch them in another API call. But if you explicitly `include` the linked resoruce attribute, it will be populated in the result set of the linking resource. + +For example, if you want to list users in a project, you can fetch the project via `Projects.Get(pid, nil)` call. Result from the call will be a Project struct which has `Users []User` attribute. The items in the `[]User` slice only have the URL attribute non-zero, the rest of the fields will be type defaults. You can then parse the ID of the User resources and fetch them consequently. Or, you can use the ListOptions struct in the project fetch call to include the Users (`members` JSON tag) as + +```go +Projects.Get(pid, &packngo.ListOptions{Includes: []{'members'}})` +``` + +Then, every item in the `[]User` slice will have all (not only the URL) attributes populated. Following code illustrates the Includes and Excludes. + + + +```go +import ( + "log" + + "github.com/packethost/packngo" +) + +func listProjectsAndUsers(lo *packngo.ListOptions) { + c, err := packngo.NewClient() + if err != nil { + log.Fatal(err) + } + + ps, _, err := c.Projects.List(lo) + if err != nil { + log.Fatal(err) + } + log.Printf("Listing for listOptions %+v\n", lo) + for _, p := range ps { + log.Printf("project resource %s has %d users", p.Name, len(p.Users)) + for _, u := range p.Users { + if u.Email != "" && u.FullName != "" { + log.Printf(" user %s has email %s\n", u.FullName, u.Email) + } else { + log.Printf(" only got user link %s\n", u.URL) + } + } + } +} + +func main() { + loMembers := &packngo.ListOptions{Includes: []string{"members"}} + loMembersOut := &packngo.ListOptions{Excludes: []string{"members"}} + listProjectsAndUsers(loMembers) + listProjectsAndUsers(nil) + listProjectsAndUsers(loMembersOut) +} +``` + + +Acceptance Tests +---------------- + +If you want to run tests against the actual Packet API, you must set envvar `PACKET_TEST_ACTUAL_API` to non-empty string for the `go test`. The device tests wait for the device creation, so it's best to run a few in parallel. + +To run a particular test, you can do + +``` +$ PACKNGO_TEST_ACTUAL_API=1 go test -v -run=TestAccDeviceBasic +``` + +If you want to see HTTP requests, set the `PACKNGO_DEBUG` env var to non-empty string, for example: + +``` +$ PACKNGO_DEBUG=1 PACKNGO_TEST_ACTUAL_API=1 go test -v -run=TestAccVolumeUpdate +``` + + +Committing +---------- + +Before committing, it's a good idea to run `gofmt -w *.go`. ([gofmt](https://golang.org/cmd/gofmt/)) diff --git a/vendor/github.com/packethost/packngo/batches.go b/vendor/github.com/packethost/packngo/batches.go new file mode 100644 index 00000000000..ca314b37f34 --- /dev/null +++ b/vendor/github.com/packethost/packngo/batches.go @@ -0,0 +1,97 @@ +package packngo + +import ( + "fmt" +) + +const batchBasePath = "/batches" + +// BatchService interface defines available batch methods +type BatchService interface { + Get(batchID string, getOpt *GetOptions) (*Batch, *Response, error) + List(ProjectID string, listOpt *ListOptions) ([]Batch, *Response, error) + Create(projectID string, batches *BatchCreateRequest) ([]Batch, *Response, error) + Delete(string, bool) (*Response, error) +} + +// Batch type +type Batch struct { + ID string `json:"id"` + State string `json:"state,omitempty"` + Quantity int32 `json:"quantity,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + Href string `json:"href,omitempty"` + Project Href `json:"project,omitempty"` + Devices []Device `json:"devices,omitempty"` +} + +//BatchesList represents collection of batches +type batchesList struct { + Batches []Batch `json:"batches,omitempty"` +} + +// BatchCreateRequest type used to create batch of device instances +type BatchCreateRequest struct { + Batches []BatchCreateDevice `json:"batches"` +} + +// BatchCreateDevice type used to describe batch instances +type BatchCreateDevice struct { + DeviceCreateRequest + Quantity int32 `json:"quantity"` + FacilityDiversityLevel int32 `json:"facility_diversity_level,omitempty"` +} + +// BatchServiceOp implements BatchService +type BatchServiceOp struct { + client *Client +} + +// Get returns batch details +func (s *BatchServiceOp) Get(batchID string, getOpt *GetOptions) (*Batch, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", batchBasePath, batchID, params) + batch := new(Batch) + + resp, err := s.client.DoRequest("GET", path, nil, batch) + if err != nil { + return nil, resp, err + } + + return batch, resp, err +} + +// List returns batches on a project +func (s *BatchServiceOp) List(projectID string, listOpt *ListOptions) (batches []Batch, resp *Response, err error) { + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, batchBasePath, params) + subset := new(batchesList) + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + batches = append(batches, subset.Batches...) + return batches, resp, err +} + +// Create function to create batch of device instances +func (s *BatchServiceOp) Create(projectID string, request *BatchCreateRequest) ([]Batch, *Response, error) { + path := fmt.Sprintf("%s/%s/devices/batch", projectBasePath, projectID) + + batches := new(batchesList) + resp, err := s.client.DoRequest("POST", path, request, batches) + + if err != nil { + return nil, resp, err + } + + return batches.Batches, resp, err +} + +// Delete function to remove an instance batch +func (s *BatchServiceOp) Delete(id string, removeDevices bool) (*Response, error) { + path := fmt.Sprintf("%s/%s?remove_associated_instances=%t", batchBasePath, id, removeDevices) + + return s.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/bgp_configs.go b/vendor/github.com/packethost/packngo/bgp_configs.go new file mode 100644 index 00000000000..aa21c556138 --- /dev/null +++ b/vendor/github.com/packethost/packngo/bgp_configs.go @@ -0,0 +1,81 @@ +package packngo + +import "fmt" + +var bgpConfigBasePath = "/bgp-config" + +// BGPConfigService interface defines available BGP config methods +type BGPConfigService interface { + Get(projectID string, getOpt *GetOptions) (*BGPConfig, *Response, error) + Create(string, CreateBGPConfigRequest) (*Response, error) + // Delete(configID string) (resp *Response, err error) TODO: Not in Packet API +} + +// BGPConfigServiceOp implements BgpConfigService +type BGPConfigServiceOp struct { + client *Client +} + +// CreateBGPConfigRequest struct +type CreateBGPConfigRequest struct { + DeploymentType string `json:"deployment_type,omitempty"` + Asn int `json:"asn,omitempty"` + Md5 string `json:"md5,omitempty"` + UseCase string `json:"use_case,omitempty"` +} + +// BGPConfig represents a Packet BGP Config +type BGPConfig struct { + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + DeploymentType string `json:"deployment_type,omitempty"` + Asn int `json:"asn,omitempty"` + RouteObject string `json:"route_object,omitempty"` + Md5 string `json:"md5,omitempty"` + MaxPrefix int `json:"max_prefix,omitempty"` + Project Project `json:"project,omitempty"` + CreatedAt Timestamp `json:"created_at,omitempty"` + RequestedAt Timestamp `json:"requested_at,omitempty"` + Sessions []BGPSession `json:"sessions,omitempty"` + Href string `json:"href,omitempty"` +} + +// Create function +func (s *BGPConfigServiceOp) Create(projectID string, request CreateBGPConfigRequest) (*Response, error) { + path := fmt.Sprintf("%s/%s%ss", projectBasePath, projectID, bgpConfigBasePath) + + resp, err := s.client.DoRequest("POST", path, request, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// Get function +func (s *BGPConfigServiceOp) Get(projectID string, getOpt *GetOptions) (bgpConfig *BGPConfig, resp *Response, err error) { + params := createGetOptionsURL(getOpt) + + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, bgpConfigBasePath, params) + + subset := new(BGPConfig) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + return subset, resp, err +} + +// Delete function TODO: this is not implemented in the Packet API +// func (s *BGPConfigServiceOp) Delete(configID string) (resp *Response, err error) { +// path := fmt.Sprintf("%ss/%s", bgpConfigBasePath, configID) + +// resp, err = s.client.DoRequest("DELETE", path, nil, nil) +// if err != nil { +// return resp, err +// } + +// return resp, err +// } diff --git a/vendor/github.com/packethost/packngo/bgp_sessions.go b/vendor/github.com/packethost/packngo/bgp_sessions.go new file mode 100644 index 00000000000..5562488fe2b --- /dev/null +++ b/vendor/github.com/packethost/packngo/bgp_sessions.go @@ -0,0 +1,72 @@ +package packngo + +import "fmt" + +var bgpSessionBasePath = "/bgp/sessions" + +// BGPSessionService interface defines available BGP session methods +type BGPSessionService interface { + Get(string, *GetOptions) (*BGPSession, *Response, error) + Create(string, CreateBGPSessionRequest) (*BGPSession, *Response, error) + Delete(string) (*Response, error) +} + +type bgpSessionsRoot struct { + Sessions []BGPSession `json:"bgp_sessions"` + Meta meta `json:"meta"` +} + +// BGPSessionServiceOp implements BgpSessionService +type BGPSessionServiceOp struct { + client *Client +} + +// BGPSession represents a Packet BGP Session +type BGPSession struct { + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + LearnedRoutes []string `json:"learned_routes,omitempty"` + AddressFamily string `json:"address_family,omitempty"` + Device Device `json:"device,omitempty"` + Href string `json:"href,omitempty"` + DefaultRoute *bool `json:"default_route,omitempty"` +} + +// CreateBGPSessionRequest struct +type CreateBGPSessionRequest struct { + AddressFamily string `json:"address_family"` + DefaultRoute *bool `json:"default_route,omitempty"` +} + +// Create function +func (s *BGPSessionServiceOp) Create(deviceID string, request CreateBGPSessionRequest) (*BGPSession, *Response, error) { + path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, bgpSessionBasePath) + session := new(BGPSession) + + resp, err := s.client.DoRequest("POST", path, request, session) + if err != nil { + return nil, resp, err + } + + return session, resp, err +} + +// Delete function +func (s *BGPSessionServiceOp) Delete(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", bgpSessionBasePath, id) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// Get function +func (s *BGPSessionServiceOp) Get(id string, getOpt *GetOptions) (session *BGPSession, response *Response, err error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", bgpSessionBasePath, id, params) + session = new(BGPSession) + response, err = s.client.DoRequest("GET", path, nil, session) + if err != nil { + return nil, response, err + } + + return session, response, err +} diff --git a/vendor/github.com/packethost/packngo/billing_address.go b/vendor/github.com/packethost/packngo/billing_address.go new file mode 100644 index 00000000000..93255b32290 --- /dev/null +++ b/vendor/github.com/packethost/packngo/billing_address.go @@ -0,0 +1,7 @@ +package packngo + +type BillingAddress struct { + StreetAddress string `json:"street_address,omitempty"` + PostalCode string `json:"postal_code,omitempty"` + CountryCode string `json:"country_code_alpha2,omitempty"` +} diff --git a/vendor/github.com/packethost/packngo/capacities.go b/vendor/github.com/packethost/packngo/capacities.go new file mode 100644 index 00000000000..fa51413be66 --- /dev/null +++ b/vendor/github.com/packethost/packngo/capacities.go @@ -0,0 +1,79 @@ +package packngo + +const capacityBasePath = "/capacity" + +// CapacityService interface defines available capacity methods +type CapacityService interface { + List() (*CapacityReport, *Response, error) + Check(*CapacityInput) (*CapacityInput, *Response, error) +} + +// CapacityInput struct +type CapacityInput struct { + Servers []ServerInfo `json:"servers,omitempty"` +} + +// ServerInfo struct +type ServerInfo struct { + Facility string `json:"facility,omitempty"` + Plan string `json:"plan,omitempty"` + Quantity int `json:"quantity,omitempty"` + Available bool `json:"available,omitempty"` +} + +type capacityRoot struct { + Capacity CapacityReport `json:"capacity,omitempty"` +} + +// CapacityReport map +type CapacityReport map[string]map[string]CapacityPerBaremetal + +// // CapacityPerFacility struct +// type CapacityPerFacility struct { +// T1SmallX86 *CapacityPerBaremetal `json:"t1.small.x86,omitempty"` +// C1SmallX86 *CapacityPerBaremetal `json:"c1.small.x86,omitempty"` +// M1XlargeX86 *CapacityPerBaremetal `json:"m1.xlarge.x86,omitempty"` +// C1XlargeX86 *CapacityPerBaremetal `json:"c1.xlarge.x86,omitempty"` + +// Baremetal0 *CapacityPerBaremetal `json:"baremetal_0,omitempty"` +// Baremetal1 *CapacityPerBaremetal `json:"baremetal_1,omitempty"` +// Baremetal1e *CapacityPerBaremetal `json:"baremetal_1e,omitempty"` +// Baremetal2 *CapacityPerBaremetal `json:"baremetal_2,omitempty"` +// Baremetal2a *CapacityPerBaremetal `json:"baremetal_2a,omitempty"` +// Baremetal2a2 *CapacityPerBaremetal `json:"baremetal_2a2,omitempty"` +// Baremetal3 *CapacityPerBaremetal `json:"baremetal_3,omitempty"` +// } + +// CapacityPerBaremetal struct +type CapacityPerBaremetal struct { + Level string `json:"level,omitempty"` +} + +// CapacityList struct +type CapacityList struct { + Capacity CapacityReport `json:"capacity,omitempty"` +} + +// CapacityServiceOp implements CapacityService +type CapacityServiceOp struct { + client *Client +} + +// List returns a list of facilities and plans with their current capacity. +func (s *CapacityServiceOp) List() (*CapacityReport, *Response, error) { + root := new(capacityRoot) + + resp, err := s.client.DoRequest("GET", capacityBasePath, nil, root) + if err != nil { + return nil, resp, err + } + + return &root.Capacity, nil, nil +} + +// Check validates if a deploy can be fulfilled. +func (s *CapacityServiceOp) Check(input *CapacityInput) (cap *CapacityInput, resp *Response, err error) { + cap = new(CapacityInput) + resp, err = s.client.DoRequest("POST", capacityBasePath, input, cap) + return cap, resp, err +} diff --git a/vendor/github.com/packethost/packngo/connect.go b/vendor/github.com/packethost/packngo/connect.go new file mode 100644 index 00000000000..28e72d1f496 --- /dev/null +++ b/vendor/github.com/packethost/packngo/connect.go @@ -0,0 +1,148 @@ +package packngo + +import "fmt" + +const ( + connectBasePath = "/packet-connect/connections" + AzureProviderID = "ed5de8e0-77a9-4d3b-9de0-65281d3aa831" +) + +type ConnectService interface { + List(string, *ListOptions) ([]Connect, *Response, error) + Get(string, string, *GetOptions) (*Connect, *Response, error) + Delete(string, string) (*Response, error) + Create(*ConnectCreateRequest) (*Connect, *Response, error) + Provision(string, string) (*Connect, *Response, error) + Deprovision(string, string, bool) (*Connect, *Response, error) +} + +type ConnectCreateRequest struct { + Name string `json:"name"` + ProjectID string `json:"project_id"` + ProviderID string `json:"provider_id"` + ProviderPayload string `json:"provider_payload"` + Facility string `json:"facility"` + PortSpeed int `json:"port_speed"` + VLAN int `json:"vlan"` + Tags []string `json:"tags,omitempty"` + Description string `json:"description,omitempty"` +} + +type Connect struct { + ID string `json:"id"` + Status string `json:"status"` + Name string `json:"name"` + ProjectID string `json:"project_id"` + ProviderID string `json:"provider_id"` + ProviderPayload string `json:"provider_payload"` + Facility string `json:"facility"` + PortSpeed int `json:"port_speed"` + VLAN int `json:"vlan"` + Description string `json:"description,omitempty"` +} + +type ConnectServiceOp struct { + client *Client +} + +type connectsRoot struct { + Connects []Connect `json:"connections"` + Meta meta `json:"meta"` +} + +func (c *ConnectServiceOp) List(projectID string, listOpt *ListOptions) (connects []Connect, resp *Response, err error) { + params := createListOptionsURL(listOpt) + + project_param := fmt.Sprintf("project_id=%s", projectID) + if params == "" { + params = project_param + } else { + params = fmt.Sprintf("%s&%s", params, project_param) + } + path := fmt.Sprintf("%s/?%s", connectBasePath, params) + + for { + subset := new(connectsRoot) + + resp, err = c.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + connects = append(connects, subset.Connects...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +func (c *ConnectServiceOp) Deprovision(connectID, projectID string, delete bool) (*Connect, *Response, error) { + params := fmt.Sprintf("project_id=%s&delete=%t", projectID, delete) + path := fmt.Sprintf("%s/%s/deprovision?%s", connectBasePath, connectID, params) + connect := new(Connect) + + resp, err := c.client.DoRequest("POST", path, nil, connect) + if err != nil { + return nil, resp, err + } + + return connect, resp, err +} + +func (c *ConnectServiceOp) Provision(connectID, projectID string) (*Connect, *Response, error) { + params := fmt.Sprintf("project_id=%s", projectID) + path := fmt.Sprintf("%s/%s/provision?%s", connectBasePath, connectID, params) + connect := new(Connect) + + resp, err := c.client.DoRequest("POST", path, nil, connect) + if err != nil { + return nil, resp, err + } + + return connect, resp, err +} + +func (c *ConnectServiceOp) Create(createRequest *ConnectCreateRequest) (*Connect, *Response, error) { + url := fmt.Sprintf("%s", connectBasePath) + connect := new(Connect) + + resp, err := c.client.DoRequest("POST", url, createRequest, connect) + if err != nil { + return nil, resp, err + } + + return connect, resp, err +} + +func (c *ConnectServiceOp) Get(connectID, projectID string, getOpt *GetOptions) (*Connect, *Response, error) { + params := createGetOptionsURL(getOpt) + project_param := fmt.Sprintf("project_id=%s", projectID) + if params == "" { + params = project_param + } else { + params = fmt.Sprintf("%s&%s", params, project_param) + } + path := fmt.Sprintf("%s/%s?%s", connectBasePath, connectID, params) + connect := new(Connect) + + resp, err := c.client.DoRequest("GET", path, nil, connect) + if err != nil { + return nil, resp, err + } + + return connect, resp, err +} + +func (c *ConnectServiceOp) Delete(connectID, projectID string) (*Response, error) { + path := fmt.Sprintf("%s/%s?project_id=%s", connectBasePath, connectID, + projectID) + + return c.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/devices.go b/vendor/github.com/packethost/packngo/devices.go new file mode 100644 index 00000000000..239efa59d49 --- /dev/null +++ b/vendor/github.com/packethost/packngo/devices.go @@ -0,0 +1,344 @@ +package packngo + +import ( + "encoding/json" + "fmt" +) + +const deviceBasePath = "/devices" + +// DeviceService interface defines available device methods +type DeviceService interface { + List(ProjectID string, listOpt *ListOptions) ([]Device, *Response, error) + Get(DeviceID string, getOpt *GetOptions) (*Device, *Response, error) + Create(*DeviceCreateRequest) (*Device, *Response, error) + Update(string, *DeviceUpdateRequest) (*Device, *Response, error) + Delete(string) (*Response, error) + Reboot(string) (*Response, error) + PowerOff(string) (*Response, error) + PowerOn(string) (*Response, error) + Lock(string) (*Response, error) + Unlock(string) (*Response, error) + ListBGPSessions(deviceID string, listOpt *ListOptions) ([]BGPSession, *Response, error) + ListEvents(string, *ListOptions) ([]Event, *Response, error) +} + +type devicesRoot struct { + Devices []Device `json:"devices"` + Meta meta `json:"meta"` +} + +// DeviceRaw represents a Packet device from API +type DeviceRaw struct { + ID string `json:"id"` + Href string `json:"href,omitempty"` + Hostname string `json:"hostname,omitempty"` + State string `json:"state,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Locked bool `json:"locked,omitempty"` + BillingCycle string `json:"billing_cycle,omitempty"` + Storage map[string]interface{} `json:"storage,omitempty"` + Tags []string `json:"tags,omitempty"` + Network []*IPAddressAssignment `json:"ip_addresses"` + Volumes []*Volume `json:"volumes"` + OS *OS `json:"operating_system,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Facility *Facility `json:"facility,omitempty"` + Project *Project `json:"project,omitempty"` + ProvisionEvents []*Event `json:"provisioning_events,omitempty"` + ProvisionPer float32 `json:"provisioning_percentage,omitempty"` + UserData string `json:"userdata,omitempty"` + RootPassword string `json:"root_password,omitempty"` + IPXEScriptURL string `json:"ipxe_script_url,omitempty"` + AlwaysPXE bool `json:"always_pxe,omitempty"` + HardwareReservation Href `json:"hardware_reservation,omitempty"` + SpotInstance bool `json:"spot_instance,omitempty"` + SpotPriceMax float64 `json:"spot_price_max,omitempty"` + TerminationTime *Timestamp `json:"termination_time,omitempty"` + NetworkPorts []Port `json:"network_ports,omitempty"` + CustomData map[string]interface{} `json:"customdata,omitempty"` + SSHKeys []SSHKey `json:"ssh_keys,omitempty"` + ShortID string `json:"short_id,omitempty"` +} + +type Device struct { + DeviceRaw + NetworkType string +} + +func (d *Device) UnmarshalJSON(b []byte) error { + dJSON := DeviceRaw{} + if err := json.Unmarshal(b, &dJSON); err != nil { + return err + } + d.DeviceRaw = dJSON + if len(dJSON.NetworkPorts) > 0 { + networkType, err := dJSON.GetNetworkType() + if err != nil { + return err + } + d.NetworkType = networkType + } + return nil +} + +type NetworkInfo struct { + PublicIPv4 string + PublicIPv6 string + PrivateIPv4 string +} + +func (d *Device) GetNetworkInfo() NetworkInfo { + ni := NetworkInfo{} + for _, ip := range d.Network { + // Initial device IPs are fixed and marked as "Management" + if ip.Management { + if ip.AddressFamily == 4 { + if ip.Public { + ni.PublicIPv4 = ip.Address + } else { + ni.PrivateIPv4 = ip.Address + } + } else { + ni.PublicIPv6 = ip.Address + } + } + } + return ni +} + +func (d Device) String() string { + return Stringify(d) +} + +func (d DeviceRaw) GetNetworkType() (string, error) { + if len(d.NetworkPorts) == 0 { + return "", fmt.Errorf("Device has no network ports listed") + } + for _, p := range d.NetworkPorts { + if p.Name == "bond0" { + return p.NetworkType, nil + } + } + return "", fmt.Errorf("Bound port not found") +} + +type IPAddressCreateRequest struct { + AddressFamily int `json:"address_family"` + Public bool `json:"public"` +} + +// DeviceCreateRequest type used to create a Packet device +type DeviceCreateRequest struct { + Hostname string `json:"hostname"` + Plan string `json:"plan"` + Facility []string `json:"facility"` + OS string `json:"operating_system"` + BillingCycle string `json:"billing_cycle"` + ProjectID string `json:"project_id"` + UserData string `json:"userdata"` + Storage string `json:"storage,omitempty"` + Tags []string `json:"tags"` + IPXEScriptURL string `json:"ipxe_script_url,omitempty"` + PublicIPv4SubnetSize int `json:"public_ipv4_subnet_size,omitempty"` + AlwaysPXE bool `json:"always_pxe,omitempty"` + HardwareReservationID string `json:"hardware_reservation_id,omitempty"` + SpotInstance bool `json:"spot_instance,omitempty"` + SpotPriceMax float64 `json:"spot_price_max,omitempty,string"` + TerminationTime *Timestamp `json:"termination_time,omitempty"` + CustomData string `json:"customdata,omitempty"` + // UserSSHKeys is a list of user UUIDs - essentialy a list of + // collaborators. The users must be a collaborator in the same project + // where the device is created. The user's SSH keys then go to the + // device. + UserSSHKeys []string `json:"user_ssh_keys,omitempty"` + // Project SSHKeys is a list of SSHKeys resource UUIDs. If this param + // is supplied, only the listed SSHKeys will go to the device. + // Any other Project SSHKeys and any User SSHKeys will not be present + // in the device. + ProjectSSHKeys []string `json:"project_ssh_keys,omitempty"` + Features map[string]string `json:"features,omitempty"` + IPAddresses []IPAddressCreateRequest `json:"ip_addresses,omitempty"` +} + +// DeviceUpdateRequest type used to update a Packet device +type DeviceUpdateRequest struct { + Hostname *string `json:"hostname,omitempty"` + Description *string `json:"description,omitempty"` + UserData *string `json:"userdata,omitempty"` + Locked *bool `json:"locked,omitempty"` + Tags *[]string `json:"tags,omitempty"` + AlwaysPXE *bool `json:"always_pxe,omitempty"` + IPXEScriptURL *string `json:"ipxe_script_url,omitempty"` + CustomData *string `json:"customdata,omitempty"` +} + +func (d DeviceCreateRequest) String() string { + return Stringify(d) +} + +// DeviceActionRequest type used to execute actions on devices +type DeviceActionRequest struct { + Type string `json:"type"` +} + +func (d DeviceActionRequest) String() string { + return Stringify(d) +} + +// DeviceServiceOp implements DeviceService +type DeviceServiceOp struct { + client *Client +} + +// List returns devices on a project +func (s *DeviceServiceOp) List(projectID string, listOpt *ListOptions) (devices []Device, resp *Response, err error) { + listOpt = makeSureListOptionsInclude(listOpt, "facility") + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, deviceBasePath, params) + + for { + subset := new(devicesRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + devices = append(devices, subset.Devices...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a device by id +func (s *DeviceServiceOp) Get(deviceID string, getOpt *GetOptions) (*Device, *Response, error) { + getOpt = makeSureGetOptionsInclude(getOpt, "facility") + params := createGetOptionsURL(getOpt) + + path := fmt.Sprintf("%s/%s?%s", deviceBasePath, deviceID, params) + device := new(Device) + resp, err := s.client.DoRequest("GET", path, nil, device) + if err != nil { + return nil, resp, err + } + return device, resp, err +} + +// Create creates a new device +func (s *DeviceServiceOp) Create(createRequest *DeviceCreateRequest) (*Device, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, createRequest.ProjectID, deviceBasePath) + device := new(Device) + + resp, err := s.client.DoRequest("POST", path, createRequest, device) + if err != nil { + return nil, resp, err + } + return device, resp, err +} + +// Update updates an existing device +func (s *DeviceServiceOp) Update(deviceID string, updateRequest *DeviceUpdateRequest) (*Device, *Response, error) { + path := fmt.Sprintf("%s/%s?include=facility", deviceBasePath, deviceID) + device := new(Device) + + resp, err := s.client.DoRequest("PUT", path, updateRequest, device) + if err != nil { + return nil, resp, err + } + + return device, resp, err +} + +// Delete deletes a device +func (s *DeviceServiceOp) Delete(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// Reboot reboots on a device +func (s *DeviceServiceOp) Reboot(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + action := &DeviceActionRequest{Type: "reboot"} + + return s.client.DoRequest("POST", path, action, nil) +} + +// PowerOff powers on a device +func (s *DeviceServiceOp) PowerOff(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + action := &DeviceActionRequest{Type: "power_off"} + + return s.client.DoRequest("POST", path, action, nil) +} + +// PowerOn powers on a device +func (s *DeviceServiceOp) PowerOn(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID) + action := &DeviceActionRequest{Type: "power_on"} + + return s.client.DoRequest("POST", path, action, nil) +} + +type lockType struct { + Locked bool `json:"locked"` +} + +// Lock sets a device to "locked" +func (s *DeviceServiceOp) Lock(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + action := lockType{Locked: true} + + return s.client.DoRequest("PATCH", path, action, nil) +} + +// Unlock sets a device to "unlocked" +func (s *DeviceServiceOp) Unlock(deviceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + action := lockType{Locked: false} + + return s.client.DoRequest("PATCH", path, action, nil) +} + +// ListBGPSessions returns all BGP Sessions associated with the device +func (s *DeviceServiceOp) ListBGPSessions(deviceID string, listOpt *ListOptions) (bgpSessions []BGPSession, resp *Response, err error) { + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", deviceBasePath, deviceID, bgpSessionBasePath, params) + + for { + subset := new(bgpSessionsRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + bgpSessions = append(bgpSessions, subset.Sessions...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + return + } +} + +// ListEvents returns list of device events +func (s *DeviceServiceOp) ListEvents(deviceID string, listOpt *ListOptions) ([]Event, *Response, error) { + path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, eventBasePath) + + return listEvents(s.client, path, listOpt) +} diff --git a/vendor/github.com/packethost/packngo/email.go b/vendor/github.com/packethost/packngo/email.go new file mode 100644 index 00000000000..acce8999b51 --- /dev/null +++ b/vendor/github.com/packethost/packngo/email.go @@ -0,0 +1,87 @@ +package packngo + +import "fmt" + +const emailBasePath = "/emails" + +// EmailRequest type used to add an email address to the current user +type EmailRequest struct { + Address string `json:"address,omitempty"` + Default *bool `json:"default,omitempty"` +} + +// EmailService interface defines available email methods +type EmailService interface { + Get(string, *GetOptions) (*Email, *Response, error) + Create(*EmailRequest) (*Email, *Response, error) + Update(string, *EmailRequest) (*Email, *Response, error) + Delete(string) (*Response, error) +} + +// Email represents a user's email address +type Email struct { + ID string `json:"id"` + Address string `json:"address"` + Default bool `json:"default,omitempty"` + URL string `json:"href,omitempty"` +} + +func (e Email) String() string { + return Stringify(e) +} + +// EmailServiceOp implements EmailService +type EmailServiceOp struct { + client *Client +} + +// Get retrieves an email by id +func (s *EmailServiceOp) Get(emailID string, getOpt *GetOptions) (*Email, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", emailBasePath, emailID, params) + email := new(Email) + + resp, err := s.client.DoRequest("GET", path, nil, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} + +// Create adds a new email address to the current user. +func (s *EmailServiceOp) Create(request *EmailRequest) (*Email, *Response, error) { + email := new(Email) + + resp, err := s.client.DoRequest("POST", emailBasePath, request, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} + +// Delete removes the email addres from the current user account +func (s *EmailServiceOp) Delete(emailID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", emailBasePath, emailID) + + resp, err := s.client.DoRequest("DELETE", path, nil, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// Update email parameters +func (s *EmailServiceOp) Update(emailID string, request *EmailRequest) (*Email, *Response, error) { + email := new(Email) + path := fmt.Sprintf("%s/%s", emailBasePath, emailID) + + resp, err := s.client.DoRequest("PUT", path, request, email) + if err != nil { + return nil, resp, err + } + + return email, resp, err +} diff --git a/vendor/github.com/packethost/packngo/events.go b/vendor/github.com/packethost/packngo/events.go new file mode 100644 index 00000000000..78ec9b7f5ba --- /dev/null +++ b/vendor/github.com/packethost/packngo/events.go @@ -0,0 +1,104 @@ +package packngo + +import "fmt" + +const eventBasePath = "/events" + +// Event struct +type Event struct { + ID string `json:"id,omitempty"` + State string `json:"state,omitempty"` + Type string `json:"type,omitempty"` + Body string `json:"body,omitempty"` + Relationships []Href `json:"relationships,omitempty"` + Interpolated string `json:"interpolated,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + Href string `json:"href,omitempty"` +} + +type eventsRoot struct { + Events []Event `json:"events,omitempty"` + Meta meta `json:"meta,omitempty"` +} + +// EventService interface defines available event functions +type EventService interface { + List(*ListOptions) ([]Event, *Response, error) + Get(string, *GetOptions) (*Event, *Response, error) +} + +// EventServiceOp implements EventService +type EventServiceOp struct { + client *Client +} + +// List returns all events +func (s *EventServiceOp) List(listOpt *ListOptions) ([]Event, *Response, error) { + return listEvents(s.client, eventBasePath, listOpt) +} + +// Get returns an event by ID +func (s *EventServiceOp) Get(eventID string, getOpt *GetOptions) (*Event, *Response, error) { + path := fmt.Sprintf("%s/%s", eventBasePath, eventID) + return get(s.client, path, getOpt) +} + +// list helper function for all event functions +func listEvents(client *Client, path string, listOpt *ListOptions) (events []Event, resp *Response, err error) { + params := createListOptionsURL(listOpt) + path = fmt.Sprintf("%s?%s", path, params) + + for { + subset := new(eventsRoot) + + resp, err = client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + events = append(events, subset.Events...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } + +} + +// list helper function for all event functions +/* +func listEvents(client *Client, path string, listOpt *ListOptions) ([]Event, *Response, error) { + params := createListOptionsURL(listOpt) + root := new(eventsRoot) + + path = fmt.Sprintf("%s?%s", path, params) + + resp, err := client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Events, resp, err +} +*/ + +func get(client *Client, path string, getOpt *GetOptions) (*Event, *Response, error) { + params := createGetOptionsURL(getOpt) + + event := new(Event) + + path = fmt.Sprintf("%s?%s", path, params) + + resp, err := client.DoRequest("GET", path, nil, event) + if err != nil { + return nil, resp, err + } + + return event, resp, err +} diff --git a/vendor/github.com/packethost/packngo/facilities.go b/vendor/github.com/packethost/packngo/facilities.go new file mode 100644 index 00000000000..fd4a7bf913c --- /dev/null +++ b/vendor/github.com/packethost/packngo/facilities.go @@ -0,0 +1,56 @@ +package packngo + +import "fmt" + +const facilityBasePath = "/facilities" + +// FacilityService interface defines available facility methods +type FacilityService interface { + List(*ListOptions) ([]Facility, *Response, error) +} + +type facilityRoot struct { + Facilities []Facility `json:"facilities"` +} + +// Facility represents a Packet facility +type Facility struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Code string `json:"code,omitempty"` + Features []string `json:"features,omitempty"` + Address *Address `json:"address,omitempty"` + URL string `json:"href,omitempty"` +} + +func (f Facility) String() string { + return Stringify(f) +} + +// Address - the physical address of the facility +type Address struct { + ID string `json:"id,omitempty"` +} + +func (a Address) String() string { + return Stringify(a) +} + +// FacilityServiceOp implements FacilityService +type FacilityServiceOp struct { + client *Client +} + +// List returns all facilities +func (s *FacilityServiceOp) List(listOpt *ListOptions) ([]Facility, *Response, error) { + root := new(facilityRoot) + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s?%s", facilityBasePath, params) + + resp, err := s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Facilities, resp, err +} diff --git a/vendor/github.com/packethost/packngo/go.mod b/vendor/github.com/packethost/packngo/go.mod new file mode 100644 index 00000000000..10ef6856dde --- /dev/null +++ b/vendor/github.com/packethost/packngo/go.mod @@ -0,0 +1,7 @@ +module github.com/packethost/packngo + +require ( + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect +) diff --git a/vendor/github.com/packethost/packngo/go.sum b/vendor/github.com/packethost/packngo/go.sum new file mode 100644 index 00000000000..ec31c502166 --- /dev/null +++ b/vendor/github.com/packethost/packngo/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/packethost/packngo/hardware_reservations.go b/vendor/github.com/packethost/packngo/hardware_reservations.go new file mode 100644 index 00000000000..826a28257ba --- /dev/null +++ b/vendor/github.com/packethost/packngo/hardware_reservations.go @@ -0,0 +1,99 @@ +package packngo + +import "fmt" + +const hardwareReservationBasePath = "/hardware-reservations" + +// HardwareReservationService interface defines available hardware reservation functions +type HardwareReservationService interface { + Get(hardwareReservationID string, getOpt *GetOptions) (*HardwareReservation, *Response, error) + List(projectID string, listOpt *ListOptions) ([]HardwareReservation, *Response, error) + Move(string, string) (*HardwareReservation, *Response, error) +} + +// HardwareReservationServiceOp implements HardwareReservationService +type HardwareReservationServiceOp struct { + client *Client +} + +// HardwareReservation struct +type HardwareReservation struct { + ID string `json:"id,omitempty"` + ShortID string `json:"short_id,omitempty"` + Facility Facility `json:"facility,omitempty"` + Plan Plan `json:"plan,omitempty"` + Provisionable bool `json:"provisionable,omitempty"` + Spare bool `json:"spare,omitempty"` + SwitchUUID string `json:"switch_uuid,omitempty"` + Intervals int `json:"intervals,omitempty"` + CurrentPeriod int `json:"current_period,omitempty"` + Href string `json:"href,omitempty"` + Project Project `json:"project,omitempty"` + Device *Device `json:"device,omitempty"` + CreatedAt Timestamp `json:"created_at,omitempty"` +} + +type hardwareReservationRoot struct { + HardwareReservations []HardwareReservation `json:"hardware_reservations"` + Meta meta `json:"meta"` +} + +// List returns all hardware reservations for a given project +func (s *HardwareReservationServiceOp) List(projectID string, listOpt *ListOptions) (reservations []HardwareReservation, resp *Response, err error) { + root := new(hardwareReservationRoot) + params := createListOptionsURL(listOpt) + + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, hardwareReservationBasePath, params) + + for { + subset := new(hardwareReservationRoot) + + resp, err = s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + reservations = append(reservations, root.HardwareReservations...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a single hardware reservation +func (s *HardwareReservationServiceOp) Get(hardwareReservationdID string, getOpt *GetOptions) (*HardwareReservation, *Response, error) { + params := createGetOptionsURL(getOpt) + + hardwareReservation := new(HardwareReservation) + + path := fmt.Sprintf("%s/%s?%s", hardwareReservationBasePath, hardwareReservationdID, params) + + resp, err := s.client.DoRequest("GET", path, nil, hardwareReservation) + if err != nil { + return nil, resp, err + } + + return hardwareReservation, resp, err +} + +// Move a hardware reservation to another project +func (s *HardwareReservationServiceOp) Move(hardwareReservationdID, projectID string) (*HardwareReservation, *Response, error) { + hardwareReservation := new(HardwareReservation) + path := fmt.Sprintf("%s/%s/%s", hardwareReservationBasePath, hardwareReservationdID, "move") + body := map[string]string{} + body["project_id"] = projectID + + resp, err := s.client.DoRequest("POST", path, body, hardwareReservation) + if err != nil { + return nil, resp, err + } + + return hardwareReservation, resp, err +} diff --git a/vendor/github.com/packethost/packngo/ip.go b/vendor/github.com/packethost/packngo/ip.go new file mode 100644 index 00000000000..edb3addb302 --- /dev/null +++ b/vendor/github.com/packethost/packngo/ip.go @@ -0,0 +1,198 @@ +package packngo + +import ( + "fmt" +) + +const ipBasePath = "/ips" + +// DeviceIPService handles assignment of addresses from reserved blocks to instances in a project. +type DeviceIPService interface { + Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error) + Unassign(assignmentID string) (*Response, error) + Get(assignmentID string, getOpt *GetOptions) (*IPAddressAssignment, *Response, error) +} + +// ProjectIPService handles reservation of IP address blocks for a project. +type ProjectIPService interface { + Get(reservationID string, getOpt *GetOptions) (*IPAddressReservation, *Response, error) + List(projectID string) ([]IPAddressReservation, *Response, error) + Request(projectID string, ipReservationReq *IPReservationRequest) (*IPAddressReservation, *Response, error) + Remove(ipReservationID string) (*Response, error) + AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error) +} + +type IpAddressCommon struct { + ID string `json:"id"` + Address string `json:"address"` + Gateway string `json:"gateway"` + Network string `json:"network"` + AddressFamily int `json:"address_family"` + Netmask string `json:"netmask"` + Public bool `json:"public"` + CIDR int `json:"cidr"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Href string `json:"href"` + Management bool `json:"management"` + Manageable bool `json:"manageable"` + Project Href `json:"project"` + Global *bool `json:"global_ip"` +} + +// IPAddressReservation is created when user sends IP reservation request for a project (considering it's within quota). +type IPAddressReservation struct { + IpAddressCommon + Assignments []Href `json:"assignments"` + Facility *Facility `json:"facility,omitempty"` + Available string `json:"available"` + Addon bool `json:"addon"` + Bill bool `json:"bill"` + Description *string `json:"details"` +} + +// AvailableResponse is a type for listing of available addresses from a reserved block. +type AvailableResponse struct { + Available []string `json:"available"` +} + +// AvailableRequest is a type for listing available addresses from a reserved block. +type AvailableRequest struct { + CIDR int `json:"cidr"` +} + +// IPAddressAssignment is created when an IP address from reservation block is assigned to a device. +type IPAddressAssignment struct { + IpAddressCommon + AssignedTo Href `json:"assigned_to"` +} + +// IPReservationRequest represents the body of a reservation request. +type IPReservationRequest struct { + Type string `json:"type"` + Quantity int `json:"quantity"` + Description string `json:"details,omitempty"` + Facility *string `json:"facility,omitempty"` +} + +// AddressStruct is a helper type for request/response with dict like {"address": ... } +type AddressStruct struct { + Address string `json:"address"` +} + +func deleteFromIP(client *Client, resourceID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", ipBasePath, resourceID) + + return client.DoRequest("DELETE", path, nil, nil) +} + +func (i IPAddressReservation) String() string { + return Stringify(i) +} + +func (i IPAddressAssignment) String() string { + return Stringify(i) +} + +// DeviceIPServiceOp is interface for IP-address assignment methods. +type DeviceIPServiceOp struct { + client *Client +} + +// Unassign unassigns an IP address from the device to which it is currently assigned. +// This will remove the relationship between an IP and the device and will make the IP +// address available to be assigned to another device. +func (i *DeviceIPServiceOp) Unassign(assignmentID string) (*Response, error) { + return deleteFromIP(i.client, assignmentID) +} + +// Assign assigns an IP address to a device. +// The IP address must be in one of the IP ranges assigned to the device’s project. +func (i *DeviceIPServiceOp) Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error) { + path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, ipBasePath) + ipa := new(IPAddressAssignment) + + resp, err := i.client.DoRequest("POST", path, assignRequest, ipa) + if err != nil { + return nil, resp, err + } + + return ipa, resp, err +} + +// Get returns assignment by ID. +func (i *DeviceIPServiceOp) Get(assignmentID string, getOpt *GetOptions) (*IPAddressAssignment, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", ipBasePath, assignmentID, params) + ipa := new(IPAddressAssignment) + + resp, err := i.client.DoRequest("GET", path, nil, ipa) + if err != nil { + return nil, resp, err + } + + return ipa, resp, err +} + +// ProjectIPServiceOp is interface for IP assignment methods. +type ProjectIPServiceOp struct { + client *Client +} + +// Get returns reservation by ID. +func (i *ProjectIPServiceOp) Get(reservationID string, getOpt *GetOptions) (*IPAddressReservation, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", ipBasePath, reservationID, params) + ipr := new(IPAddressReservation) + + resp, err := i.client.DoRequest("GET", path, nil, ipr) + if err != nil { + return nil, resp, err + } + + return ipr, resp, err +} + +// List provides a list of IP resevations for a single project. +func (i *ProjectIPServiceOp) List(projectID string) ([]IPAddressReservation, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath) + reservations := new(struct { + Reservations []IPAddressReservation `json:"ip_addresses"` + }) + + resp, err := i.client.DoRequest("GET", path, nil, reservations) + if err != nil { + return nil, resp, err + } + return reservations.Reservations, resp, nil +} + +// Request requests more IP space for a project in order to have additional IP addresses to assign to devices. +func (i *ProjectIPServiceOp) Request(projectID string, ipReservationReq *IPReservationRequest) (*IPAddressReservation, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath) + ipr := new(IPAddressReservation) + + resp, err := i.client.DoRequest("POST", path, ipReservationReq, ipr) + if err != nil { + return nil, resp, err + } + return ipr, resp, err +} + +// Remove removes an IP reservation from the project. +func (i *ProjectIPServiceOp) Remove(ipReservationID string) (*Response, error) { + return deleteFromIP(i.client, ipReservationID) +} + +// AvailableAddresses lists addresses available from a reserved block +func (i *ProjectIPServiceOp) AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error) { + path := fmt.Sprintf("%s/%s/available?cidr=%d", ipBasePath, ipReservationID, r.CIDR) + ar := new(AvailableResponse) + + resp, err := i.client.DoRequest("GET", path, r, ar) + if err != nil { + return nil, resp, err + } + return ar.Available, resp, nil + +} diff --git a/vendor/github.com/packethost/packngo/notifications.go b/vendor/github.com/packethost/packngo/notifications.go new file mode 100644 index 00000000000..051ca56f9c6 --- /dev/null +++ b/vendor/github.com/packethost/packngo/notifications.go @@ -0,0 +1,95 @@ +package packngo + +import "fmt" + +const notificationBasePath = "/notifications" + +// Notification struct +type Notification struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Body string `json:"body,omitempty"` + Severity string `json:"severity,omitempty"` + Read bool `json:"read,omitempty"` + Context string `json:"context,omitempty"` + CreatedAt Timestamp `json:"created_at,omitempty"` + UpdatedAt Timestamp `json:"updated_at,omitempty"` + User Href `json:"user,omitempty"` + Href string `json:"href,omitempty"` +} + +type notificationsRoot struct { + Notifications []Notification `json:"notifications,omitempty"` + Meta meta `json:"meta,omitempty"` +} + +// NotificationService interface defines available event functions +type NotificationService interface { + List(*ListOptions) ([]Notification, *Response, error) + Get(string, *GetOptions) (*Notification, *Response, error) + MarkAsRead(string) (*Notification, *Response, error) +} + +// NotificationServiceOp implements NotificationService +type NotificationServiceOp struct { + client *Client +} + +// List returns all notifications +func (s *NotificationServiceOp) List(listOpt *ListOptions) ([]Notification, *Response, error) { + return listNotifications(s.client, notificationBasePath, listOpt) +} + +// Get returns a notification by ID +func (s *NotificationServiceOp) Get(notificationID string, getOpt *GetOptions) (*Notification, *Response, error) { + params := createGetOptionsURL(getOpt) + + path := fmt.Sprintf("%s/%s?%s", notificationBasePath, notificationID, params) + return getNotifications(s.client, path) +} + +// Marks notification as read by ID +func (s *NotificationServiceOp) MarkAsRead(notificationID string) (*Notification, *Response, error) { + path := fmt.Sprintf("%s/%s", notificationBasePath, notificationID) + return markAsRead(s.client, path) +} + +// list helper function for all notification functions +func listNotifications(client *Client, path string, listOpt *ListOptions) ([]Notification, *Response, error) { + params := createListOptionsURL(listOpt) + + root := new(notificationsRoot) + + path = fmt.Sprintf("%s?%s", path, params) + + resp, err := client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Notifications, resp, err +} + +func getNotifications(client *Client, path string) (*Notification, *Response, error) { + + notification := new(Notification) + + resp, err := client.DoRequest("GET", path, nil, notification) + if err != nil { + return nil, resp, err + } + + return notification, resp, err +} + +func markAsRead(client *Client, path string) (*Notification, *Response, error) { + + notification := new(Notification) + + resp, err := client.DoRequest("PUT", path, nil, notification) + if err != nil { + return nil, resp, err + } + + return notification, resp, err +} diff --git a/vendor/github.com/packethost/packngo/operatingsystems.go b/vendor/github.com/packethost/packngo/operatingsystems.go new file mode 100644 index 00000000000..12aa1bb5b0d --- /dev/null +++ b/vendor/github.com/packethost/packngo/operatingsystems.go @@ -0,0 +1,42 @@ +package packngo + +const osBasePath = "/operating-systems" + +// OSService interface defines available operating_systems methods +type OSService interface { + List() ([]OS, *Response, error) +} + +type osRoot struct { + OperatingSystems []OS `json:"operating_systems"` +} + +// OS represents a Packet operating system +type OS struct { + Name string `json:"name"` + Slug string `json:"slug"` + Distro string `json:"distro"` + Version string `json:"version"` + ProvisionableOn []string `json:"provisionable_on"` +} + +func (o OS) String() string { + return Stringify(o) +} + +// OSServiceOp implements OSService +type OSServiceOp struct { + client *Client +} + +// List returns all available operating systems +func (s *OSServiceOp) List() ([]OS, *Response, error) { + root := new(osRoot) + + resp, err := s.client.DoRequest("GET", osBasePath, nil, root) + if err != nil { + return nil, resp, err + } + + return root.OperatingSystems, resp, err +} diff --git a/vendor/github.com/packethost/packngo/organizations.go b/vendor/github.com/packethost/packngo/organizations.go new file mode 100644 index 00000000000..03b59c6017d --- /dev/null +++ b/vendor/github.com/packethost/packngo/organizations.go @@ -0,0 +1,171 @@ +package packngo + +import "fmt" + +// API documentation https://www.packet.net/developers/api/organizations/ +const organizationBasePath = "/organizations" + +// OrganizationService interface defines available organization methods +type OrganizationService interface { + List(*ListOptions) ([]Organization, *Response, error) + Get(string, *GetOptions) (*Organization, *Response, error) + Create(*OrganizationCreateRequest) (*Organization, *Response, error) + Update(string, *OrganizationUpdateRequest) (*Organization, *Response, error) + Delete(string) (*Response, error) + ListPaymentMethods(string) ([]PaymentMethod, *Response, error) + ListEvents(string, *ListOptions) ([]Event, *Response, error) +} + +type organizationsRoot struct { + Organizations []Organization `json:"organizations"` + Meta meta `json:"meta"` +} + +// Organization represents a Packet organization +type Organization struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Website string `json:"website,omitempty"` + Twitter string `json:"twitter,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Address Address `json:"address,omitempty"` + TaxID string `json:"tax_id,omitempty"` + MainPhone string `json:"main_phone,omitempty"` + BillingPhone string `json:"billing_phone,omitempty"` + CreditAmount float64 `json:"credit_amount,omitempty"` + Logo string `json:"logo,omitempty"` + LogoThumb string `json:"logo_thumb,omitempty"` + Projects []Project `json:"projects,omitempty"` + URL string `json:"href,omitempty"` + Users []User `json:"members,omitempty"` + Owners []User `json:"owners,omitempty"` +} + +func (o Organization) String() string { + return Stringify(o) +} + +// OrganizationCreateRequest type used to create a Packet organization +type OrganizationCreateRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Website string `json:"website"` + Twitter string `json:"twitter"` + Logo string `json:"logo"` +} + +func (o OrganizationCreateRequest) String() string { + return Stringify(o) +} + +// OrganizationUpdateRequest type used to update a Packet organization +type OrganizationUpdateRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Website *string `json:"website,omitempty"` + Twitter *string `json:"twitter,omitempty"` + Logo *string `json:"logo,omitempty"` +} + +func (o OrganizationUpdateRequest) String() string { + return Stringify(o) +} + +// OrganizationServiceOp implements OrganizationService +type OrganizationServiceOp struct { + client *Client +} + +// List returns the user's organizations +func (s *OrganizationServiceOp) List(listOpt *ListOptions) (orgs []Organization, resp *Response, err error) { + params := createListOptionsURL(listOpt) + root := new(organizationsRoot) + + path := fmt.Sprintf("%s?%s", organizationBasePath, params) + + for { + resp, err = s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + orgs = append(orgs, root.Organizations...) + + if root.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = root.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + return + } +} + +// Get returns a organization by id +func (s *OrganizationServiceOp) Get(organizationID string, getOpt *GetOptions) (*Organization, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", organizationBasePath, organizationID, params) + organization := new(Organization) + + resp, err := s.client.DoRequest("GET", path, nil, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, err +} + +// Create creates a new organization +func (s *OrganizationServiceOp) Create(createRequest *OrganizationCreateRequest) (*Organization, *Response, error) { + organization := new(Organization) + + resp, err := s.client.DoRequest("POST", organizationBasePath, createRequest, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, err +} + +// Update updates an organization +func (s *OrganizationServiceOp) Update(id string, updateRequest *OrganizationUpdateRequest) (*Organization, *Response, error) { + path := fmt.Sprintf("%s/%s", organizationBasePath, id) + organization := new(Organization) + + resp, err := s.client.DoRequest("PATCH", path, updateRequest, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, err +} + +// Delete deletes an organizationID +func (s *OrganizationServiceOp) Delete(organizationID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", organizationBasePath, organizationID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// ListPaymentMethods returns PaymentMethods for an organization +func (s *OrganizationServiceOp) ListPaymentMethods(organizationID string) ([]PaymentMethod, *Response, error) { + url := fmt.Sprintf("%s/%s%s", organizationBasePath, organizationID, paymentMethodBasePath) + root := new(paymentMethodsRoot) + + resp, err := s.client.DoRequest("GET", url, nil, root) + if err != nil { + return nil, resp, err + } + + return root.PaymentMethods, resp, err +} + +// ListEvents returns list of organization events +func (s *OrganizationServiceOp) ListEvents(organizationID string, listOpt *ListOptions) ([]Event, *Response, error) { + path := fmt.Sprintf("%s/%s%s", organizationBasePath, organizationID, eventBasePath) + + return listEvents(s.client, path, listOpt) +} diff --git a/vendor/github.com/packethost/packngo/packngo.go b/vendor/github.com/packethost/packngo/packngo.go new file mode 100644 index 00000000000..47b89a29fa4 --- /dev/null +++ b/vendor/github.com/packethost/packngo/packngo.go @@ -0,0 +1,394 @@ +package packngo + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/httputil" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +const ( + packetTokenEnvVar = "PACKET_AUTH_TOKEN" + libraryVersion = "0.1.0" + baseURL = "https://api.packet.net/" + userAgent = "packngo/" + libraryVersion + mediaType = "application/json" + debugEnvVar = "PACKNGO_DEBUG" + + headerRateLimit = "X-RateLimit-Limit" + headerRateRemaining = "X-RateLimit-Remaining" + headerRateReset = "X-RateLimit-Reset" +) + +type GetOptions struct { + Includes []string + Excludes []string +} + +// ListOptions specifies optional global API parameters +type ListOptions struct { + // for paginated result sets, page of results to retrieve + Page int `url:"page,omitempty"` + // for paginated result sets, the number of results to return per page + PerPage int `url:"per_page,omitempty"` + Includes []string + Excludes []string +} + +func makeSureGetOptionsInclude(g *GetOptions, s string) *GetOptions { + if g == nil { + return &GetOptions{Includes: []string{s}} + } + if !contains(g.Includes, s) { + g.Includes = append(g.Includes, s) + } + return g +} + +func makeSureListOptionsInclude(l *ListOptions, s string) *ListOptions { + if l == nil { + return &ListOptions{Includes: []string{s}} + } + if !contains(l.Includes, s) { + l.Includes = append(l.Includes, s) + } + return l +} + +func createGetOptionsURL(g *GetOptions) (url string) { + if g == nil { + return "" + } + if len(g.Includes) != 0 { + url += fmt.Sprintf("include=%s", strings.Join(g.Includes, ",")) + } + if len(g.Excludes) != 0 { + if url != "" { + url += "&" + } + url += fmt.Sprintf("exclude=%s", strings.Join(g.Excludes, ",")) + } + return + +} + +func createListOptionsURL(l *ListOptions) (url string) { + if l == nil { + return "" + } + if len(l.Includes) != 0 { + url += fmt.Sprintf("include=%s", strings.Join(l.Includes, ",")) + } + if len(l.Excludes) != 0 { + if url != "" { + url += "&" + } + url += fmt.Sprintf("exclude=%s", strings.Join(l.Excludes, ",")) + } + if l.Page != 0 { + if url != "" { + url += "&" + } + url += fmt.Sprintf("page=%d", l.Page) + } + + if l.PerPage != 0 { + if url != "" { + url += "&" + } + url += fmt.Sprintf("per_page=%d", l.PerPage) + } + + return +} + +// meta contains pagination information +type meta struct { + Self *Href `json:"self"` + First *Href `json:"first"` + Last *Href `json:"last"` + Previous *Href `json:"previous,omitempty"` + Next *Href `json:"next,omitempty"` + Total int `json:"total"` + CurrentPageNum int `json:"current_page"` + LastPageNum int `json:"last_page"` +} + +// Response is the http response from api calls +type Response struct { + *http.Response + Rate +} + +// Href is an API link +type Href struct { + Href string `json:"href"` +} + +func (r *Response) populateRate() { + // parse the rate limit headers and populate Response.Rate + if limit := r.Header.Get(headerRateLimit); limit != "" { + r.Rate.RequestLimit, _ = strconv.Atoi(limit) + } + if remaining := r.Header.Get(headerRateRemaining); remaining != "" { + r.Rate.RequestsRemaining, _ = strconv.Atoi(remaining) + } + if reset := r.Header.Get(headerRateReset); reset != "" { + if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { + r.Rate.Reset = Timestamp{time.Unix(v, 0)} + } + } +} + +// ErrorResponse is the http response used on errors +type ErrorResponse struct { + Response *http.Response + Errors []string `json:"errors"` + SingleError string `json:"error"` +} + +func (r *ErrorResponse) Error() string { + return fmt.Sprintf("%v %v: %d %v %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, strings.Join(r.Errors, ", "), r.SingleError) +} + +// Client is the base API Client +type Client struct { + client *http.Client + debug bool + + BaseURL *url.URL + + UserAgent string + ConsumerToken string + APIKey string + + RateLimit Rate + + // Packet Api Objects + Plans PlanService + Users UserService + Emails EmailService + SSHKeys SSHKeyService + Devices DeviceService + Projects ProjectService + Facilities FacilityService + OperatingSystems OSService + DeviceIPs DeviceIPService + DevicePorts DevicePortService + ProjectIPs ProjectIPService + ProjectVirtualNetworks ProjectVirtualNetworkService + Volumes VolumeService + VolumeAttachments VolumeAttachmentService + SpotMarket SpotMarketService + SpotMarketRequests SpotMarketRequestService + Organizations OrganizationService + BGPSessions BGPSessionService + BGPConfig BGPConfigService + CapacityService CapacityService + Batches BatchService + TwoFactorAuth TwoFactorAuthService + VPN VPNService + HardwareReservations HardwareReservationService + Events EventService + Notifications NotificationService + Connects ConnectService +} + +// NewRequest inits a new http request with the proper headers +func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) { + // relative path to append to the endpoint url, no leading slash please + rel, err := url.Parse(path) + if err != nil { + return nil, err + } + + u := c.BaseURL.ResolveReference(rel) + + // json encode the request body, if any + buf := new(bytes.Buffer) + if body != nil { + err := json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + req.Close = true + + req.Header.Add("X-Auth-Token", c.APIKey) + req.Header.Add("X-Consumer-Token", c.ConsumerToken) + + req.Header.Add("Content-Type", mediaType) + req.Header.Add("Accept", mediaType) + req.Header.Add("User-Agent", userAgent) + return req, nil +} + +// Do executes the http request +func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + response := Response{Response: resp} + response.populateRate() + if c.debug { + o, _ := httputil.DumpResponse(response.Response, true) + log.Printf("\n=======[RESPONSE]============\n%s\n\n", string(o)) + } + c.RateLimit = response.Rate + + err = checkResponse(resp) + // if the response is an error, return the ErrorResponse + if err != nil { + return &response, err + } + + if v != nil { + // if v implements the io.Writer interface, return the raw response + if w, ok := v.(io.Writer); ok { + io.Copy(w, resp.Body) + } else { + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return &response, err + } + } + } + + return &response, err +} + +// DoRequest is a convenience method, it calls NewRequest followed by Do +// v is the interface to unmarshal the response JSON into +func (c *Client) DoRequest(method, path string, body, v interface{}) (*Response, error) { + req, err := c.NewRequest(method, path, body) + if c.debug { + o, _ := httputil.DumpRequestOut(req, true) + log.Printf("\n=======[REQUEST]=============\n%s\n", string(o)) + } + if err != nil { + return nil, err + } + return c.Do(req, v) +} + +// DoRequestWithHeader same as DoRequest +func (c *Client) DoRequestWithHeader(method string, headers map[string]string, path string, body, v interface{}) (*Response, error) { + req, err := c.NewRequest(method, path, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + if c.debug { + o, _ := httputil.DumpRequestOut(req, true) + log.Printf("\n=======[REQUEST]=============\n%s\n", string(o)) + } + if err != nil { + return nil, err + } + return c.Do(req, v) +} + +// NewClient initializes and returns a Client +func NewClient() (*Client, error) { + apiToken := os.Getenv(packetTokenEnvVar) + if apiToken == "" { + return nil, fmt.Errorf("you must export %s", packetTokenEnvVar) + } + c := NewClientWithAuth("packngo lib", apiToken, nil) + return c, nil + +} + +// NewClientWithAuth initializes and returns a Client, use this to get an API Client to operate on +// N.B.: Packet's API certificate requires Go 1.5+ to successfully parse. If you are using +// an older version of Go, pass in a custom http.Client with a custom TLS configuration +// that sets "InsecureSkipVerify" to "true" +func NewClientWithAuth(consumerToken string, apiKey string, httpClient *http.Client) *Client { + client, _ := NewClientWithBaseURL(consumerToken, apiKey, httpClient, baseURL) + return client +} + +// NewClientWithBaseURL returns a Client pointing to nonstandard API URL, e.g. +// for mocking the remote API +func NewClientWithBaseURL(consumerToken string, apiKey string, httpClient *http.Client, apiBaseURL string) (*Client, error) { + if httpClient == nil { + // Don't fall back on http.DefaultClient as it's not nice to adjust state + // implicitly. If the client wants to use http.DefaultClient, they can + // pass it in explicitly. + httpClient = &http.Client{} + } + + u, err := url.Parse(apiBaseURL) + if err != nil { + return nil, err + } + + c := &Client{client: httpClient, BaseURL: u, UserAgent: userAgent, ConsumerToken: consumerToken, APIKey: apiKey} + c.debug = os.Getenv(debugEnvVar) != "" + c.Plans = &PlanServiceOp{client: c} + c.Organizations = &OrganizationServiceOp{client: c} + c.Users = &UserServiceOp{client: c} + c.Emails = &EmailServiceOp{client: c} + c.SSHKeys = &SSHKeyServiceOp{client: c} + c.Devices = &DeviceServiceOp{client: c} + c.Projects = &ProjectServiceOp{client: c} + c.Facilities = &FacilityServiceOp{client: c} + c.OperatingSystems = &OSServiceOp{client: c} + c.DeviceIPs = &DeviceIPServiceOp{client: c} + c.DevicePorts = &DevicePortServiceOp{client: c} + c.ProjectVirtualNetworks = &ProjectVirtualNetworkServiceOp{client: c} + c.ProjectIPs = &ProjectIPServiceOp{client: c} + c.Volumes = &VolumeServiceOp{client: c} + c.VolumeAttachments = &VolumeAttachmentServiceOp{client: c} + c.SpotMarket = &SpotMarketServiceOp{client: c} + c.BGPSessions = &BGPSessionServiceOp{client: c} + c.BGPConfig = &BGPConfigServiceOp{client: c} + c.CapacityService = &CapacityServiceOp{client: c} + c.Batches = &BatchServiceOp{client: c} + c.TwoFactorAuth = &TwoFactorAuthServiceOp{client: c} + c.VPN = &VPNServiceOp{client: c} + c.HardwareReservations = &HardwareReservationServiceOp{client: c} + c.SpotMarketRequests = &SpotMarketRequestServiceOp{client: c} + c.Events = &EventServiceOp{client: c} + c.Notifications = &NotificationServiceOp{client: c} + c.Connects = &ConnectServiceOp{client: c} + + return c, nil +} + +func checkResponse(r *http.Response) error { + // return if http status code is within 200 range + if c := r.StatusCode; c >= 200 && c <= 299 { + // response is good, return + return nil + } + + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + // if the response has a body, populate the message in errorResponse + if err == nil && len(data) > 0 { + json.Unmarshal(data, errorResponse) + } + + return errorResponse +} diff --git a/vendor/github.com/packethost/packngo/payment_methods.go b/vendor/github.com/packethost/packngo/payment_methods.go new file mode 100644 index 00000000000..0dc98fa0c91 --- /dev/null +++ b/vendor/github.com/packethost/packngo/payment_methods.go @@ -0,0 +1,72 @@ +package packngo + +// API documentation https://www.packet.net/developers/api/paymentmethods/ +const paymentMethodBasePath = "/payment-methods" + +// ProjectService interface defines available project methods +type PaymentMethodService interface { + List() ([]PaymentMethod, *Response, error) + Get(string) (*PaymentMethod, *Response, error) + Create(*PaymentMethodCreateRequest) (*PaymentMethod, *Response, error) + Update(string, *PaymentMethodUpdateRequest) (*PaymentMethod, *Response, error) + Delete(string) (*Response, error) +} + +type paymentMethodsRoot struct { + PaymentMethods []PaymentMethod `json:"payment_methods"` +} + +// PaymentMethod represents a Packet payment method of an organization +type PaymentMethod struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Nonce string `json:"nonce,omitempty"` + Default bool `json:"default,omitempty"` + Organization Organization `json:"organization,omitempty"` + Projects []Project `json:"projects,omitempty"` + Type string `json:"type,omitempty"` + CardholderName string `json:"cardholder_name,omitempty"` + ExpMonth string `json:"expiration_month,omitempty"` + ExpYear string `json:"expiration_year,omitempty"` + Last4 string `json:"last_4,omitempty"` + BillingAddress BillingAddress `json:"billing_address,omitempty"` + URL string `json:"href,omitempty"` +} + +func (pm PaymentMethod) String() string { + return Stringify(pm) +} + +// PaymentMethodCreateRequest type used to create a Packet payment method of an organization +type PaymentMethodCreateRequest struct { + Name string `json:"name"` + Nonce string `json:"nonce"` + CardholderName string `json:"cardholder_name,omitempty"` + ExpMonth string `json:"expiration_month,omitempty"` + ExpYear string `json:"expiration_year,omitempty"` + BillingAddress string `json:"billing_address,omitempty"` +} + +func (pm PaymentMethodCreateRequest) String() string { + return Stringify(pm) +} + +// PaymentMethodUpdateRequest type used to update a Packet payment method of an organization +type PaymentMethodUpdateRequest struct { + Name *string `json:"name,omitempty"` + CardholderName *string `json:"cardholder_name,omitempty"` + ExpMonth *string `json:"expiration_month,omitempty"` + ExpYear *string `json:"expiration_year,omitempty"` + BillingAddress *string `json:"billing_address,omitempty"` +} + +func (pm PaymentMethodUpdateRequest) String() string { + return Stringify(pm) +} + +// PaymentMethodServiceOp implements PaymentMethodService +type PaymentMethodServiceOp struct { + client *Client +} diff --git a/vendor/github.com/packethost/packngo/plans.go b/vendor/github.com/packethost/packngo/plans.go new file mode 100644 index 00000000000..36b0a2ff24b --- /dev/null +++ b/vendor/github.com/packethost/packngo/plans.go @@ -0,0 +1,126 @@ +package packngo + +import ( + "fmt" +) + +const planBasePath = "/plans" + +// PlanService interface defines available plan methods +type PlanService interface { + List(*ListOptions) ([]Plan, *Response, error) +} + +type planRoot struct { + Plans []Plan `json:"plans"` +} + +// Plan represents a Packet service plan +type Plan struct { + ID string `json:"id"` + Slug string `json:"slug,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Line string `json:"line,omitempty"` + Specs *Specs `json:"specs,omitempty"` + Pricing *Pricing `json:"pricing,omitempty"` + DeploymentTypes []string `json:"deployment_types"` + Class string `json:"class"` + AvailableIn []Facility `json:"available_in"` +} + +func (p Plan) String() string { + return Stringify(p) +} + +// Specs - the server specs for a plan +type Specs struct { + Cpus []*Cpus `json:"cpus,omitempty"` + Memory *Memory `json:"memory,omitempty"` + Drives []*Drives `json:"drives,omitempty"` + Nics []*Nics `json:"nics,omitempty"` + Features *Features `json:"features,omitempty"` +} + +func (s Specs) String() string { + return Stringify(s) +} + +// Cpus - the CPU config details for specs on a plan +type Cpus struct { + Count int `json:"count,omitempty"` + Type string `json:"type,omitempty"` +} + +func (c Cpus) String() string { + return Stringify(c) +} + +// Memory - the RAM config details for specs on a plan +type Memory struct { + Total string `json:"total,omitempty"` +} + +func (m Memory) String() string { + return Stringify(m) +} + +// Drives - the storage config details for specs on a plan +type Drives struct { + Count int `json:"count,omitempty"` + Size string `json:"size,omitempty"` + Type string `json:"type,omitempty"` +} + +func (d Drives) String() string { + return Stringify(d) +} + +// Nics - the network hardware details for specs on a plan +type Nics struct { + Count int `json:"count,omitempty"` + Type string `json:"type,omitempty"` +} + +func (n Nics) String() string { + return Stringify(n) +} + +// Features - other features in the specs for a plan +type Features struct { + Raid bool `json:"raid,omitempty"` + Txt bool `json:"txt,omitempty"` +} + +func (f Features) String() string { + return Stringify(f) +} + +// Pricing - the pricing options on a plan +type Pricing struct { + Hour float32 `json:"hour,omitempty"` + Month float32 `json:"month,omitempty"` +} + +func (p Pricing) String() string { + return Stringify(p) +} + +// PlanServiceOp implements PlanService +type PlanServiceOp struct { + client *Client +} + +// List method returns all available plans +func (s *PlanServiceOp) List(listOpt *ListOptions) ([]Plan, *Response, error) { + root := new(planRoot) + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s?%s", planBasePath, params) + + resp, err := s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.Plans, resp, err +} diff --git a/vendor/github.com/packethost/packngo/ports.go b/vendor/github.com/packethost/packngo/ports.go new file mode 100644 index 00000000000..5c0bab89819 --- /dev/null +++ b/vendor/github.com/packethost/packngo/ports.go @@ -0,0 +1,321 @@ +package packngo + +import ( + "fmt" +) + +const portBasePath = "/ports" + +// DevicePortService handles operations on a port which belongs to a particular device +type DevicePortService interface { + Assign(*PortAssignRequest) (*Port, *Response, error) + Unassign(*PortAssignRequest) (*Port, *Response, error) + AssignNative(*PortAssignRequest) (*Port, *Response, error) + UnassignNative(string) (*Port, *Response, error) + Bond(*BondRequest) (*Port, *Response, error) + Disbond(*DisbondRequest) (*Port, *Response, error) + DeviceToNetworkType(string, string) (*Device, error) + DeviceNetworkType(string) (string, error) + PortToLayerTwo(string) (*Port, *Response, error) + PortToLayerThree(string) (*Port, *Response, error) + DeviceToLayerTwo(string) (*Device, error) + DeviceToLayerThree(string) (*Device, error) + GetBondPort(string) (*Port, error) + GetPortByName(string, string) (*Port, error) +} + +type PortData struct { + MAC string `json:"mac"` + Bonded bool `json:"bonded"` +} + +type Port struct { + ID string `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Data PortData `json:"data"` + NetworkType string `json:"network_type,omitempty"` + NativeVirtualNetwork *VirtualNetwork `json:"native_virtual_network"` + AttachedVirtualNetworks []VirtualNetwork `json:"virtual_networks"` +} + +type AddressRequest struct { + AddressFamily int `json:"address_family"` + Public bool `json:"public"` +} + +type BackToL3Request struct { + RequestIPs []AddressRequest `json:"request_ips"` +} + +type DevicePortServiceOp struct { + client *Client +} + +type PortAssignRequest struct { + PortID string `json:"id"` + VirtualNetworkID string `json:"vnid"` +} + +type BondRequest struct { + PortID string `json:"id"` + BulkEnable bool `json:"bulk_enable"` +} + +type DisbondRequest struct { + PortID string `json:"id"` + BulkDisable bool `json:"bulk_disable"` +} + +func (i *DevicePortServiceOp) GetBondPort(deviceID string) (*Port, error) { + device, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return nil, err + } + for _, port := range device.NetworkPorts { + if port.Type == "NetworkBondPort" { + return &port, nil + } + } + + return nil, fmt.Errorf("No bonded port found in device %s", deviceID) +} + +func (i *DevicePortServiceOp) GetPortByName(deviceID, name string) (*Port, error) { + device, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return nil, err + } + for _, port := range device.NetworkPorts { + if port.Name == name { + return &port, nil + } + } + + return nil, fmt.Errorf("Port %s not found in device %s", name, deviceID) +} + +func (i *DevicePortServiceOp) Assign(par *PortAssignRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/assign", portBasePath, par.PortID) + return i.portAction(path, par) +} + +func (i *DevicePortServiceOp) AssignNative(par *PortAssignRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/native-vlan", portBasePath, par.PortID) + return i.portAction(path, par) +} + +func (i *DevicePortServiceOp) UnassignNative(portID string) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/native-vlan", portBasePath, portID) + port := new(Port) + + resp, err := i.client.DoRequest("DELETE", path, nil, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) Unassign(par *PortAssignRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/unassign", portBasePath, par.PortID) + return i.portAction(path, par) +} + +func (i *DevicePortServiceOp) Bond(br *BondRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/bond", portBasePath, br.PortID) + return i.portAction(path, br) +} + +func (i *DevicePortServiceOp) Disbond(dr *DisbondRequest) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/disbond", portBasePath, dr.PortID) + return i.portAction(path, dr) +} + +func (i *DevicePortServiceOp) portAction(path string, req interface{}) (*Port, *Response, error) { + port := new(Port) + + resp, err := i.client.DoRequest("POST", path, req, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) PortToLayerTwo(portID string) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/convert/layer-2", portBasePath, portID) + port := new(Port) + + resp, err := i.client.DoRequest("POST", path, nil, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) PortToLayerThree(portID string) (*Port, *Response, error) { + path := fmt.Sprintf("%s/%s/convert/layer-3", portBasePath, portID) + port := new(Port) + + req := BackToL3Request{ + RequestIPs: []AddressRequest{ + AddressRequest{AddressFamily: 4, Public: true}, + AddressRequest{AddressFamily: 4, Public: false}, + AddressRequest{AddressFamily: 6, Public: true}, + }, + } + + resp, err := i.client.DoRequest("POST", path, &req, port) + if err != nil { + return nil, resp, err + } + + return port, resp, err +} + +func (i *DevicePortServiceOp) DeviceNetworkType(deviceID string) (string, error) { + d, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return "", err + } + return d.NetworkType, nil +} + +func (i *DevicePortServiceOp) DeviceToNetworkType(deviceID string, nType string) (*Device, error) { + + d, _, err := i.client.Devices.Get(deviceID, nil) + if err != nil { + return nil, err + } + + curType := d.NetworkType + + if curType == nType { + return nil, fmt.Errorf("Device already is in state %s", nType) + } + bond0ID := "" + eth1ID := "" + for _, port := range d.NetworkPorts { + if port.Name == "bond0" { + bond0ID = port.ID + } + if port.Name == "eth1" { + eth1ID = port.ID + } + } + + if nType == "layer3" { + if curType == "layer2-individual" || curType == "layer2-bonded" { + if curType == "layer2-individual" { + _, _, err := i.client.DevicePorts.Bond( + &BondRequest{PortID: bond0ID, BulkEnable: false}) + if err != nil { + return nil, err + } + + } + _, _, err := i.client.DevicePorts.PortToLayerThree(bond0ID) + if err != nil { + return nil, err + } + } + _, _, err = i.client.DevicePorts.Bond( + &BondRequest{PortID: bond0ID, BulkEnable: true}) + if err != nil { + return nil, err + } + } + if nType == "hybrid" { + if curType == "layer2-individual" || curType == "layer2-bonded" { + if curType == "layer2-individual" { + _, _, err = i.client.DevicePorts.Bond( + &BondRequest{PortID: bond0ID, BulkEnable: false}) + if err != nil { + return nil, err + } + } + _, _, err = i.client.DevicePorts.PortToLayerThree(bond0ID) + if err != nil { + return nil, err + } + } + _, _, err := i.client.DevicePorts.Disbond( + &DisbondRequest{PortID: eth1ID, BulkDisable: false}) + if err != nil { + return nil, err + } + } + if nType == "layer2-individual" { + if curType == "hybrid" || curType == "layer3" { + _, _, err = i.client.DevicePorts.PortToLayerTwo(bond0ID) + if err != nil { + return nil, err + } + + } + _, _, err = i.client.DevicePorts.Disbond( + &DisbondRequest{PortID: bond0ID, BulkDisable: true}) + if err != nil { + return nil, err + } + } + if nType == "layer2-bonded" { + if curType == "hybrid" || curType == "layer3" { + _, _, err = i.client.DevicePorts.PortToLayerTwo(bond0ID) + if err != nil { + return nil, err + } + } + _, _, err = i.client.DevicePorts.Bond( + &BondRequest{PortID: bond0ID, BulkEnable: false}) + if err != nil { + return nil, err + } + } + + d, _, err = i.client.Devices.Get(deviceID, nil) + if err != nil { + return nil, err + } + + if d.NetworkType != nType { + return nil, fmt.Errorf( + "Failed to convert device %s from %s to %s. New type was %s", + deviceID, curType, nType, d.NetworkType) + + } + return d, err +} + +func (i *DevicePortServiceOp) DeviceToLayerThree(deviceID string) (*Device, error) { + // hopefull all the VLANs are unassigned at this point + bond0, err := i.client.DevicePorts.GetBondPort(deviceID) + if err != nil { + return nil, err + } + + bond0, _, err = i.client.DevicePorts.PortToLayerThree(bond0.ID) + if err != nil { + return nil, err + } + d, _, err := i.client.Devices.Get(deviceID, nil) + return d, err +} + +// DeviceToLayerTwo converts device to L2 networking. Use bond0 to attach VLAN. +func (i *DevicePortServiceOp) DeviceToLayerTwo(deviceID string) (*Device, error) { + bond0, err := i.client.DevicePorts.GetBondPort(deviceID) + if err != nil { + return nil, err + } + + bond0, _, err = i.client.DevicePorts.PortToLayerTwo(bond0.ID) + if err != nil { + return nil, err + } + d, _, err := i.client.Devices.Get(deviceID, nil) + return d, err + +} diff --git a/vendor/github.com/packethost/packngo/projects.go b/vendor/github.com/packethost/packngo/projects.go new file mode 100644 index 00000000000..cb2649eef2b --- /dev/null +++ b/vendor/github.com/packethost/packngo/projects.go @@ -0,0 +1,174 @@ +package packngo + +import ( + "fmt" +) + +const projectBasePath = "/projects" + +// ProjectService interface defines available project methods +type ProjectService interface { + List(listOpt *ListOptions) ([]Project, *Response, error) + Get(string, *GetOptions) (*Project, *Response, error) + Create(*ProjectCreateRequest) (*Project, *Response, error) + Update(string, *ProjectUpdateRequest) (*Project, *Response, error) + Delete(string) (*Response, error) + ListBGPSessions(projectID string, listOpt *ListOptions) ([]BGPSession, *Response, error) + ListEvents(string, *ListOptions) ([]Event, *Response, error) +} + +type projectsRoot struct { + Projects []Project `json:"projects"` + Meta meta `json:"meta"` +} + +// Project represents a Packet project +type Project struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Organization Organization `json:"organization,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Users []User `json:"members,omitempty"` + Devices []Device `json:"devices,omitempty"` + SSHKeys []SSHKey `json:"ssh_keys,omitempty"` + URL string `json:"href,omitempty"` + PaymentMethod PaymentMethod `json:"payment_method,omitempty"` + BackendTransfer bool `json:"backend_transfer_enabled"` +} + +func (p Project) String() string { + return Stringify(p) +} + +// ProjectCreateRequest type used to create a Packet project +type ProjectCreateRequest struct { + Name string `json:"name"` + PaymentMethodID string `json:"payment_method_id,omitempty"` + OrganizationID string `json:"organization_id,omitempty"` +} + +func (p ProjectCreateRequest) String() string { + return Stringify(p) +} + +// ProjectUpdateRequest type used to update a Packet project +type ProjectUpdateRequest struct { + Name *string `json:"name,omitempty"` + PaymentMethodID *string `json:"payment_method_id,omitempty"` + BackendTransfer *bool `json:"backend_transfer_enabled,omitempty"` +} + +func (p ProjectUpdateRequest) String() string { + return Stringify(p) +} + +// ProjectServiceOp implements ProjectService +type ProjectServiceOp struct { + client *Client +} + +// List returns the user's projects +func (s *ProjectServiceOp) List(listOpt *ListOptions) (projects []Project, resp *Response, err error) { + params := createListOptionsURL(listOpt) + root := new(projectsRoot) + + path := fmt.Sprintf("%s?%s", projectBasePath, params) + + for { + resp, err = s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + projects = append(projects, root.Projects...) + + if root.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = root.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a project by id +func (s *ProjectServiceOp) Get(projectID string, getOpt *GetOptions) (*Project, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", projectBasePath, projectID, params) + project := new(Project) + resp, err := s.client.DoRequest("GET", path, nil, project) + if err != nil { + return nil, resp, err + } + return project, resp, err +} + +// Create creates a new project +func (s *ProjectServiceOp) Create(createRequest *ProjectCreateRequest) (*Project, *Response, error) { + project := new(Project) + + resp, err := s.client.DoRequest("POST", projectBasePath, createRequest, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Update updates a project +func (s *ProjectServiceOp) Update(id string, updateRequest *ProjectUpdateRequest) (*Project, *Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, id) + project := new(Project) + + resp, err := s.client.DoRequest("PATCH", path, updateRequest, project) + if err != nil { + return nil, resp, err + } + + return project, resp, err +} + +// Delete deletes a project +func (s *ProjectServiceOp) Delete(projectID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", projectBasePath, projectID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} + +// ListBGPSessions returns all BGP Sessions associated with the project +func (s *ProjectServiceOp) ListBGPSessions(projectID string, listOpt *ListOptions) (bgpSessions []BGPSession, resp *Response, err error) { + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, bgpSessionBasePath, params) + + for { + subset := new(bgpSessionsRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + bgpSessions = append(bgpSessions, subset.Sessions...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// ListEvents returns list of project events +func (s *ProjectServiceOp) ListEvents(projectID string, listOpt *ListOptions) ([]Event, *Response, error) { + path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, eventBasePath) + + return listEvents(s.client, path, listOpt) +} diff --git a/vendor/github.com/packethost/packngo/rate.go b/vendor/github.com/packethost/packngo/rate.go new file mode 100644 index 00000000000..965967d4557 --- /dev/null +++ b/vendor/github.com/packethost/packngo/rate.go @@ -0,0 +1,12 @@ +package packngo + +// Rate provides the API request rate limit details +type Rate struct { + RequestLimit int `json:"request_limit"` + RequestsRemaining int `json:"requests_remaining"` + Reset Timestamp `json:"rate_reset"` +} + +func (r Rate) String() string { + return Stringify(r) +} diff --git a/vendor/github.com/packethost/packngo/spotmarket.go b/vendor/github.com/packethost/packngo/spotmarket.go new file mode 100644 index 00000000000..5dfb7d559b1 --- /dev/null +++ b/vendor/github.com/packethost/packngo/spotmarket.go @@ -0,0 +1,39 @@ +package packngo + +const spotMarketBasePath = "/market/spot/prices" + +// SpotMarketService expooses Spot Market methods +type SpotMarketService interface { + Prices() (PriceMap, *Response, error) +} + +// SpotMarketServiceOp implements SpotMarketService +type SpotMarketServiceOp struct { + client *Client +} + +// PriceMap is a map of [facility][plan]-> float Price +type PriceMap map[string]map[string]float64 + +// Prices gets current PriceMap from the API +func (s *SpotMarketServiceOp) Prices() (PriceMap, *Response, error) { + root := new(struct { + SMPs map[string]map[string]struct { + Price float64 `json:"price"` + } `json:"spot_market_prices"` + }) + + resp, err := s.client.DoRequest("GET", spotMarketBasePath, nil, root) + if err != nil { + return nil, resp, err + } + + prices := make(PriceMap) + for facility, planMap := range root.SMPs { + prices[facility] = map[string]float64{} + for plan, v := range planMap { + prices[facility][plan] = v.Price + } + } + return prices, resp, err +} diff --git a/vendor/github.com/packethost/packngo/spotmarketrequest.go b/vendor/github.com/packethost/packngo/spotmarketrequest.go new file mode 100644 index 00000000000..3f5b2559480 --- /dev/null +++ b/vendor/github.com/packethost/packngo/spotmarketrequest.go @@ -0,0 +1,114 @@ +package packngo + +import ( + "fmt" + "math" +) + +const spotMarketRequestBasePath = "/spot-market-requests" + +type SpotMarketRequestService interface { + List(string, *ListOptions) ([]SpotMarketRequest, *Response, error) + Create(*SpotMarketRequestCreateRequest, string) (*SpotMarketRequest, *Response, error) + Delete(string, bool) (*Response, error) + Get(string, *GetOptions) (*SpotMarketRequest, *Response, error) +} + +type SpotMarketRequestCreateRequest struct { + DevicesMax int `json:"devices_max"` + DevicesMin int `json:"devices_min"` + EndAt *Timestamp `json:"end_at,omitempty"` + FacilityIDs []string `json:"facilities"` + MaxBidPrice float64 `json:"max_bid_price"` + + Parameters SpotMarketRequestInstanceParameters `json:"instance_parameters"` +} + +type SpotMarketRequest struct { + SpotMarketRequestCreateRequest + ID string `json:"id"` + Devices []Device `json:"devices"` + Facilities []Facility `json:"facilities"` + Project Project `json:"project"` + Href string `json:"href"` + Plan Plan `json:"plan"` +} + +type SpotMarketRequestInstanceParameters struct { + AlwaysPXE bool `json:"always_pxe,omitempty"` + BillingCycle string `json:"billing_cycle"` + CustomData string `json:"customdata,omitempty"` + Description string `json:"description,omitempty"` + Features []string `json:"features,omitempty"` + Hostname string `json:"hostname,omitempty"` + Hostnames []string `json:"hostnames,omitempty"` + Locked bool `json:"locked,omitempty"` + OperatingSystem string `json:"operating_system"` + Plan string `json:"plan"` + ProjectSSHKeys []string `json:"project_ssh_keys,omitempty"` + Tags []string `json:"tags"` + TerminationTime *Timestamp `json:"termination_time,omitempty"` + UserSSHKeys []string `json:"user_ssh_keys,omitempty"` + UserData string `json:"userdata"` +} + +type SpotMarketRequestServiceOp struct { + client *Client +} + +func roundPlus(f float64, places int) float64 { + shift := math.Pow(10, float64(places)) + return math.Floor(f*shift+.5) / shift +} + +func (s *SpotMarketRequestServiceOp) Create(cr *SpotMarketRequestCreateRequest, pID string) (*SpotMarketRequest, *Response, error) { + path := fmt.Sprintf("%s/%s%s?include=devices,project,plan", projectBasePath, pID, spotMarketRequestBasePath) + cr.MaxBidPrice = roundPlus(cr.MaxBidPrice, 2) + smr := new(SpotMarketRequest) + + resp, err := s.client.DoRequest("POST", path, cr, smr) + if err != nil { + return nil, resp, err + } + + return smr, resp, err +} + +func (s *SpotMarketRequestServiceOp) List(pID string, listOpt *ListOptions) ([]SpotMarketRequest, *Response, error) { + type smrRoot struct { + SMRs []SpotMarketRequest `json:"spot_market_requests"` + } + + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, pID, spotMarketRequestBasePath, params) + output := new(smrRoot) + + resp, err := s.client.DoRequest("GET", path, nil, output) + if err != nil { + return nil, nil, err + } + + return output.SMRs, resp, nil +} + +func (s *SpotMarketRequestServiceOp) Get(id string, getOpt *GetOptions) (*SpotMarketRequest, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", spotMarketRequestBasePath, id, params) + smr := new(SpotMarketRequest) + + resp, err := s.client.DoRequest("GET", path, nil, &smr) + if err != nil { + return nil, resp, err + } + + return smr, resp, err +} + +func (s *SpotMarketRequestServiceOp) Delete(id string, forceDelete bool) (*Response, error) { + path := fmt.Sprintf("%s/%s", spotMarketRequestBasePath, id) + var params *map[string]bool + if forceDelete { + params = &map[string]bool{"force_termination": true} + } + return s.client.DoRequest("DELETE", path, params, nil) +} diff --git a/vendor/github.com/packethost/packngo/sshkeys.go b/vendor/github.com/packethost/packngo/sshkeys.go new file mode 100644 index 00000000000..4b198f571a4 --- /dev/null +++ b/vendor/github.com/packethost/packngo/sshkeys.go @@ -0,0 +1,139 @@ +package packngo + +import "fmt" + +const ( + sshKeyBasePath = "/ssh-keys" +) + +// SSHKeyService interface defines available device methods +type SSHKeyService interface { + List() ([]SSHKey, *Response, error) + ProjectList(string) ([]SSHKey, *Response, error) + Get(string, *GetOptions) (*SSHKey, *Response, error) + Create(*SSHKeyCreateRequest) (*SSHKey, *Response, error) + Update(string, *SSHKeyUpdateRequest) (*SSHKey, *Response, error) + Delete(string) (*Response, error) +} + +type sshKeyRoot struct { + SSHKeys []SSHKey `json:"ssh_keys"` +} + +// SSHKey represents a user's ssh key +type SSHKey struct { + ID string `json:"id"` + Label string `json:"label"` + Key string `json:"key"` + FingerPrint string `json:"fingerprint"` + Created string `json:"created_at"` + Updated string `json:"updated_at"` + User User `json:"user,omitempty"` + URL string `json:"href,omitempty"` +} + +func (s SSHKey) String() string { + return Stringify(s) +} + +// SSHKeyCreateRequest type used to create an ssh key +type SSHKeyCreateRequest struct { + Label string `json:"label"` + Key string `json:"key"` + ProjectID string `json:"-"` +} + +func (s SSHKeyCreateRequest) String() string { + return Stringify(s) +} + +// SSHKeyUpdateRequest type used to update an ssh key +type SSHKeyUpdateRequest struct { + Label *string `json:"label,omitempty"` + Key *string `json:"key,omitempty"` +} + +func (s SSHKeyUpdateRequest) String() string { + return Stringify(s) +} + +// SSHKeyServiceOp implements SSHKeyService +type SSHKeyServiceOp struct { + client *Client +} + +func (s *SSHKeyServiceOp) list(url string) ([]SSHKey, *Response, error) { + root := new(sshKeyRoot) + + resp, err := s.client.DoRequest("GET", url, nil, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKeys, resp, err +} + +// ProjectList lists ssh keys of a project +func (s *SSHKeyServiceOp) ProjectList(projectID string) ([]SSHKey, *Response, error) { + return s.list(fmt.Sprintf("%s/%s%s", projectBasePath, projectID, sshKeyBasePath)) + +} + +// List returns a user's ssh keys +func (s *SSHKeyServiceOp) List() ([]SSHKey, *Response, error) { + return s.list(sshKeyBasePath) +} + +// Get returns an ssh key by id +func (s *SSHKeyServiceOp) Get(sshKeyID string, getOpt *GetOptions) (*SSHKey, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", sshKeyBasePath, sshKeyID, params) + sshKey := new(SSHKey) + + resp, err := s.client.DoRequest("GET", path, nil, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Create creates a new ssh key +func (s *SSHKeyServiceOp) Create(createRequest *SSHKeyCreateRequest) (*SSHKey, *Response, error) { + path := sshKeyBasePath + if createRequest.ProjectID != "" { + path = fmt.Sprintf("%s/%s%s", projectBasePath, createRequest.ProjectID, sshKeyBasePath) + } + sshKey := new(SSHKey) + + resp, err := s.client.DoRequest("POST", path, createRequest, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Update updates an ssh key +func (s *SSHKeyServiceOp) Update(id string, updateRequest *SSHKeyUpdateRequest) (*SSHKey, *Response, error) { + if updateRequest.Label == nil && updateRequest.Key == nil { + return nil, nil, fmt.Errorf("You must set either Label or Key string for SSH Key update") + } + path := fmt.Sprintf("%s/%s", sshKeyBasePath, id) + + sshKey := new(SSHKey) + + resp, err := s.client.DoRequest("PATCH", path, updateRequest, sshKey) + if err != nil { + return nil, resp, err + } + + return sshKey, resp, err +} + +// Delete deletes an ssh key +func (s *SSHKeyServiceOp) Delete(sshKeyID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", sshKeyBasePath, sshKeyID) + + return s.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/timestamp.go b/vendor/github.com/packethost/packngo/timestamp.go new file mode 100644 index 00000000000..c3320ed62eb --- /dev/null +++ b/vendor/github.com/packethost/packngo/timestamp.go @@ -0,0 +1,35 @@ +package packngo + +import ( + "strconv" + "time" +) + +// Timestamp represents a time that can be unmarshalled from a JSON string +// formatted as either an RFC3339 or Unix timestamp. All +// exported methods of time.Time can be called on Timestamp. +type Timestamp struct { + time.Time +} + +func (t Timestamp) String() string { + return t.Time.String() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Time is expected in RFC3339 or Unix format. +func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { + str := string(data) + i, err := strconv.ParseInt(str, 10, 64) + if err == nil { + t.Time = time.Unix(i, 0) + } else { + t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) + } + return +} + +// Equal reports whether t and u are equal based on time.Equal +func (t Timestamp) Equal(u Timestamp) bool { + return t.Time.Equal(u.Time) +} diff --git a/vendor/github.com/packethost/packngo/two_factor_auth.go b/vendor/github.com/packethost/packngo/two_factor_auth.go new file mode 100644 index 00000000000..5064b09fe3c --- /dev/null +++ b/vendor/github.com/packethost/packngo/two_factor_auth.go @@ -0,0 +1,56 @@ +package packngo + +const twoFactorAuthAppPath = "/user/otp/app" +const twoFactorAuthSmsPath = "/user/otp/sms" + +// TwoFactorAuthService interface defines available two factor authentication functions +type TwoFactorAuthService interface { + EnableApp(string) (*Response, error) + DisableApp(string) (*Response, error) + EnableSms(string) (*Response, error) + DisableSms(string) (*Response, error) + ReceiveSms() (*Response, error) + SeedApp() (string, *Response, error) +} + +// TwoFactorAuthServiceOp implements TwoFactorAuthService +type TwoFactorAuthServiceOp struct { + client *Client +} + +// EnableApp function enables two factor auth using authenticatior app +func (s *TwoFactorAuthServiceOp) EnableApp(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("POST", headers, twoFactorAuthAppPath, nil, nil) +} + +// EnableSms function enables two factor auth using sms +func (s *TwoFactorAuthServiceOp) EnableSms(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("POST", headers, twoFactorAuthSmsPath, nil, nil) +} + +// ReceiveSms orders the auth service to issue an SMS token +func (s *TwoFactorAuthServiceOp) ReceiveSms() (resp *Response, err error) { + return s.client.DoRequest("POST", twoFactorAuthSmsPath+"/receive", nil, nil) +} + +// DisableApp function disables two factor auth using +func (s *TwoFactorAuthServiceOp) DisableApp(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("DELETE", headers, twoFactorAuthAppPath, nil, nil) +} + +// DisableSms function disables two factor auth using +func (s *TwoFactorAuthServiceOp) DisableSms(token string) (resp *Response, err error) { + headers := map[string]string{"x-otp-token": token} + return s.client.DoRequestWithHeader("DELETE", headers, twoFactorAuthSmsPath, nil, nil) +} + +// SeedApp orders the auth service to issue a token via google authenticator +func (s *TwoFactorAuthServiceOp) SeedApp() (otpURI string, resp *Response, err error) { + ret := &map[string]string{} + resp, err = s.client.DoRequest("POST", twoFactorAuthAppPath+"/receive", nil, ret) + + return (*ret)["otp_uri"], resp, err +} diff --git a/vendor/github.com/packethost/packngo/user.go b/vendor/github.com/packethost/packngo/user.go new file mode 100644 index 00000000000..ef4b25bb61c --- /dev/null +++ b/vendor/github.com/packethost/packngo/user.go @@ -0,0 +1,100 @@ +package packngo + +import "fmt" + +const usersBasePath = "/users" +const userBasePath = "/user" + +// UserService interface defines available user methods +type UserService interface { + List(*ListOptions) ([]User, *Response, error) + Get(string, *GetOptions) (*User, *Response, error) + Current() (*User, *Response, error) +} + +type usersRoot struct { + Users []User `json:"users"` + Meta meta `json:"meta"` +} + +// User represents a Packet user +type User struct { + ID string `json:"id"` + FirstName string `json:"first_name,omitempty"` + LastName string `json:"last_name,omitempty"` + FullName string `json:"full_name,omitempty"` + Email string `json:"email,omitempty"` + TwoFactor string `json:"two_factor_auth,omitempty"` + DefaultOrganizationID string `json:"default_organization_id,omitempty"` + AvatarURL string `json:"avatar_url,omitempty"` + Facebook string `json:"twitter,omitempty"` + Twitter string `json:"facebook,omitempty"` + LinkedIn string `json:"linkedin,omitempty"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + TimeZone string `json:"timezone,omitempty"` + Emails []Email `json:"emails,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + URL string `json:"href,omitempty"` + VPN bool `json:"vpn"` +} + +func (u User) String() string { + return Stringify(u) +} + +// UserServiceOp implements UserService +type UserServiceOp struct { + client *Client +} + +// Get method gets a user by userID +func (s *UserServiceOp) List(listOpt *ListOptions) (users []User, resp *Response, err error) { + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s?%s", usersBasePath, params) + + for { + subset := new(usersRoot) + + resp, err = s.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + users = append(users, subset.Users...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + return + } +} + +// Returns the user object for the currently logged-in user. +func (s *UserServiceOp) Current() (*User, *Response, error) { + user := new(User) + + resp, err := s.client.DoRequest("GET", userBasePath, nil, user) + if err != nil { + return nil, resp, err + } + + return user, resp, err +} + +func (s *UserServiceOp) Get(userID string, getOpt *GetOptions) (*User, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", usersBasePath, userID, params) + user := new(User) + + resp, err := s.client.DoRequest("GET", path, nil, user) + if err != nil { + return nil, resp, err + } + + return user, resp, err +} diff --git a/vendor/github.com/packethost/packngo/utils.go b/vendor/github.com/packethost/packngo/utils.go new file mode 100644 index 00000000000..db67f1f40e0 --- /dev/null +++ b/vendor/github.com/packethost/packngo/utils.go @@ -0,0 +1,115 @@ +package packngo + +import ( + "bytes" + "fmt" + "io" + "reflect" +) + +var ( + timestampType = reflect.TypeOf(Timestamp{}) + Facilities = []string{ + "yyz1", "nrt1", "atl1", "mrs1", "hkg1", "ams1", + "ewr1", "sin1", "dfw1", "lax1", "syd1", "sjc1", + "ord1", "iad1", "fra1", "sea1", "dfw2"} + FacilityFeatures = []string{ + "baremetal", "layer_2", "backend_transfer", "storage", "global_ipv4"} + UtilizationLevels = []string{"unavailable", "critical", "limited", "normal"} + DevicePlans = []string{"c2.medium.x86", "g2.large.x86", + "m2.xlarge.x86", "x2.xlarge.x86", "baremetal_2a", "baremetal_2a2", + "baremetal_1", "baremetal_3", "baremetal_2", "baremetal_s", + "baremetal_0", "baremetal_1e", + } +) + +// Stringify creates a string representation of the provided message +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// StreamToString converts a reader to a string +func StreamToString(stream io.Reader) string { + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + return buf.String() +} + +// contains tells whether a contains x. +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} + +// stringifyValue was graciously cargoculted from the goprotubuf library +func stringifyValue(w io.Writer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + w.Write([]byte("")) + return + } + + v := reflect.Indirect(val) + + switch v.Kind() { + case reflect.String: + fmt.Fprintf(w, `"%s"`, v) + case reflect.Slice: + w.Write([]byte{'['}) + for i := 0; i < v.Len(); i++ { + if i > 0 { + w.Write([]byte{' '}) + } + + stringifyValue(w, v.Index(i)) + } + + w.Write([]byte{']'}) + return + case reflect.Struct: + if v.Type().Name() != "" { + w.Write([]byte(v.Type().String())) + } + + // special handling of Timestamp values + if v.Type() == timestampType { + fmt.Fprintf(w, "{%s}", v.Interface()) + return + } + + w.Write([]byte{'{'}) + + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + + if sep { + w.Write([]byte(", ")) + } else { + sep = true + } + + w.Write([]byte(v.Type().Field(i).Name)) + w.Write([]byte{':'}) + stringifyValue(w, fv) + } + + w.Write([]byte{'}'}) + default: + if v.CanInterface() { + fmt.Fprint(w, v.Interface()) + } + } +} diff --git a/vendor/github.com/packethost/packngo/virtualnetworks.go b/vendor/github.com/packethost/packngo/virtualnetworks.go new file mode 100644 index 00000000000..5f0f9d0f080 --- /dev/null +++ b/vendor/github.com/packethost/packngo/virtualnetworks.go @@ -0,0 +1,92 @@ +package packngo + +import ( + "fmt" +) + +const virtualNetworkBasePath = "/virtual-networks" + +// DevicePortService handles operations on a port which belongs to a particular device +type ProjectVirtualNetworkService interface { + List(projectID string, listOpt *ListOptions) (*VirtualNetworkListResponse, *Response, error) + Create(*VirtualNetworkCreateRequest) (*VirtualNetwork, *Response, error) + Get(string, *GetOptions) (*VirtualNetwork, *Response, error) + Delete(virtualNetworkID string) (*Response, error) +} + +type VirtualNetwork struct { + ID string `json:"id"` + Description string `json:"description,omitempty"` + VXLAN int `json:"vxlan,omitempty"` + FacilityCode string `json:"facility_code,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + Href string `json:"href"` + Project Project `json:"assigned_to"` +} + +type ProjectVirtualNetworkServiceOp struct { + client *Client +} + +type VirtualNetworkListResponse struct { + VirtualNetworks []VirtualNetwork `json:"virtual_networks"` +} + +func (i *ProjectVirtualNetworkServiceOp) List(projectID string, listOpt *ListOptions) (*VirtualNetworkListResponse, *Response, error) { + + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, virtualNetworkBasePath, params) + output := new(VirtualNetworkListResponse) + + resp, err := i.client.DoRequest("GET", path, nil, output) + if err != nil { + return nil, nil, err + } + + return output, resp, nil +} + +type VirtualNetworkCreateRequest struct { + ProjectID string `json:"project_id"` + Description string `json:"description"` + Facility string `json:"facility"` +} + +func (i *ProjectVirtualNetworkServiceOp) Get(vlanID string, getOpt *GetOptions) (*VirtualNetwork, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", virtualNetworkBasePath, vlanID, params) + vlan := new(VirtualNetwork) + + resp, err := i.client.DoRequest("GET", path, nil, vlan) + if err != nil { + return nil, resp, err + } + + return vlan, resp, err +} + +func (i *ProjectVirtualNetworkServiceOp) Create(input *VirtualNetworkCreateRequest) (*VirtualNetwork, *Response, error) { + // TODO: May need to add timestamp to output from 'post' request + // for the 'created_at' attribute of VirtualNetwork struct since + // API response doesn't include it + path := fmt.Sprintf("%s/%s%s", projectBasePath, input.ProjectID, virtualNetworkBasePath) + output := new(VirtualNetwork) + + resp, err := i.client.DoRequest("POST", path, input, output) + if err != nil { + return nil, nil, err + } + + return output, resp, nil +} + +func (i *ProjectVirtualNetworkServiceOp) Delete(virtualNetworkID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", virtualNetworkBasePath, virtualNetworkID) + + resp, err := i.client.DoRequest("DELETE", path, nil, nil) + if err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/packethost/packngo/volumes.go b/vendor/github.com/packethost/packngo/volumes.go new file mode 100644 index 00000000000..ebaaddf1c23 --- /dev/null +++ b/vendor/github.com/packethost/packngo/volumes.go @@ -0,0 +1,238 @@ +package packngo + +import ( + "fmt" +) + +const ( + volumeBasePath = "/storage" + attachmentsBasePath = "/attachments" +) + +// VolumeService interface defines available Volume methods +type VolumeService interface { + List(string, *ListOptions) ([]Volume, *Response, error) + Get(string, *GetOptions) (*Volume, *Response, error) + Update(string, *VolumeUpdateRequest) (*Volume, *Response, error) + Delete(string) (*Response, error) + Create(*VolumeCreateRequest, string) (*Volume, *Response, error) + Lock(string) (*Response, error) + Unlock(string) (*Response, error) +} + +// VolumeAttachmentService defines attachment methdods +type VolumeAttachmentService interface { + Get(string, *GetOptions) (*VolumeAttachment, *Response, error) + Create(string, string) (*VolumeAttachment, *Response, error) + Delete(string) (*Response, error) +} + +type volumesRoot struct { + Volumes []Volume `json:"volumes"` + Meta meta `json:"meta"` +} + +// Volume represents a volume +type Volume struct { + Attachments []*VolumeAttachment `json:"attachments,omitempty"` + BillingCycle string `json:"billing_cycle,omitempty"` + Created string `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + Facility *Facility `json:"facility,omitempty"` + Href string `json:"href,omitempty"` + ID string `json:"id"` + Locked bool `json:"locked,omitempty"` + Name string `json:"name,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Project *Project `json:"project,omitempty"` + Size int `json:"size,omitempty"` + SnapshotPolicies []*SnapshotPolicy `json:"snapshot_policies,omitempty"` + State string `json:"state,omitempty"` + Updated string `json:"updated_at,omitempty"` +} + +// SnapshotPolicy used to execute actions on volume +type SnapshotPolicy struct { + ID string `json:"id"` + Href string `json:"href"` + SnapshotFrequency string `json:"snapshot_frequency,omitempty"` + SnapshotCount int `json:"snapshot_count,omitempty"` +} + +func (v Volume) String() string { + return Stringify(v) +} + +// VolumeCreateRequest type used to create a Packet volume +type VolumeCreateRequest struct { + BillingCycle string `json:"billing_cycle"` + Description string `json:"description,omitempty"` + Locked bool `json:"locked,omitempty"` + Size int `json:"size"` + PlanID string `json:"plan_id"` + FacilityID string `json:"facility_id"` + SnapshotPolicies []*SnapshotPolicy `json:"snapshot_policies,omitempty"` +} + +func (v VolumeCreateRequest) String() string { + return Stringify(v) +} + +// VolumeUpdateRequest type used to update a Packet volume +type VolumeUpdateRequest struct { + Description *string `json:"description,omitempty"` + PlanID *string `json:"plan_id,omitempty"` + Size *int `json:"size,omitempty"` + BillingCycle *string `json:"billing_cycle,omitempty"` +} + +// VolumeAttachment is a type from Packet API +type VolumeAttachment struct { + Href string `json:"href"` + ID string `json:"id"` + Volume Volume `json:"volume"` + Device Device `json:"device"` +} + +func (v VolumeUpdateRequest) String() string { + return Stringify(v) +} + +// VolumeAttachmentServiceOp implements VolumeService +type VolumeAttachmentServiceOp struct { + client *Client +} + +// VolumeServiceOp implements VolumeService +type VolumeServiceOp struct { + client *Client +} + +// List returns the volumes for a project +func (v *VolumeServiceOp) List(projectID string, listOpt *ListOptions) (volumes []Volume, resp *Response, err error) { + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, volumeBasePath, params) + + for { + subset := new(volumesRoot) + + resp, err = v.client.DoRequest("GET", path, nil, subset) + if err != nil { + return nil, resp, err + } + + volumes = append(volumes, subset.Volumes...) + + if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { + path = subset.Meta.Next.Href + if params != "" { + path = fmt.Sprintf("%s&%s", path, params) + } + continue + } + + return + } +} + +// Get returns a volume by id +func (v *VolumeServiceOp) Get(volumeID string, getOpt *GetOptions) (*Volume, *Response, error) { + params := createGetOptionsURL(getOpt) + path := fmt.Sprintf("%s/%s?%s", volumeBasePath, volumeID, params) + volume := new(Volume) + + resp, err := v.client.DoRequest("GET", path, nil, volume) + if err != nil { + return nil, resp, err + } + + return volume, resp, err +} + +// Update updates a volume +func (v *VolumeServiceOp) Update(id string, updateRequest *VolumeUpdateRequest) (*Volume, *Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, id) + volume := new(Volume) + + resp, err := v.client.DoRequest("PATCH", path, updateRequest, volume) + if err != nil { + return nil, resp, err + } + + return volume, resp, err +} + +// Delete deletes a volume +func (v *VolumeServiceOp) Delete(volumeID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, volumeID) + + return v.client.DoRequest("DELETE", path, nil, nil) +} + +// Create creates a new volume for a project +func (v *VolumeServiceOp) Create(createRequest *VolumeCreateRequest, projectID string) (*Volume, *Response, error) { + url := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, volumeBasePath) + volume := new(Volume) + + resp, err := v.client.DoRequest("POST", url, createRequest, volume) + if err != nil { + return nil, resp, err + } + + return volume, resp, err +} + +// Attachments + +// Create Attachment, i.e. attach volume to a device +func (v *VolumeAttachmentServiceOp) Create(volumeID, deviceID string) (*VolumeAttachment, *Response, error) { + url := fmt.Sprintf("%s/%s%s", volumeBasePath, volumeID, attachmentsBasePath) + volAttachParam := map[string]string{ + "device_id": deviceID, + } + volumeAttachment := new(VolumeAttachment) + + resp, err := v.client.DoRequest("POST", url, volAttachParam, volumeAttachment) + if err != nil { + return nil, resp, err + } + return volumeAttachment, resp, nil +} + +// Get gets attachment by id +func (v *VolumeAttachmentServiceOp) Get(attachmentID string, getOpt *GetOptions) (*VolumeAttachment, *Response, error) { + params := createGetOptionsURL(getOpt) + + path := fmt.Sprintf("%s%s/%s?%s", volumeBasePath, attachmentsBasePath, attachmentID, params) + volumeAttachment := new(VolumeAttachment) + + resp, err := v.client.DoRequest("GET", path, nil, volumeAttachment) + if err != nil { + return nil, resp, err + } + + return volumeAttachment, resp, nil +} + +// Delete deletes attachment by id +func (v *VolumeAttachmentServiceOp) Delete(attachmentID string) (*Response, error) { + path := fmt.Sprintf("%s%s/%s", volumeBasePath, attachmentsBasePath, attachmentID) + + return v.client.DoRequest("DELETE", path, nil, nil) +} + +// Lock sets a volume to "locked" +func (s *VolumeServiceOp) Lock(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, id) + action := lockType{Locked: true} + + return s.client.DoRequest("PATCH", path, action, nil) +} + +// Unlock sets a volume to "unlocked" +func (s *VolumeServiceOp) Unlock(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", volumeBasePath, id) + action := lockType{Locked: false} + + return s.client.DoRequest("PATCH", path, action, nil) +} diff --git a/vendor/github.com/packethost/packngo/vpn.go b/vendor/github.com/packethost/packngo/vpn.go new file mode 100644 index 00000000000..f228f7d43e4 --- /dev/null +++ b/vendor/github.com/packethost/packngo/vpn.go @@ -0,0 +1,50 @@ +package packngo + +import "fmt" + +const vpnBasePath = "/user/vpn" + +// VPNConfig struct +type VPNConfig struct { + Config string `json:"config,omitempty"` +} + +// VPNService interface defines available VPN functions +type VPNService interface { + Enable() (*Response, error) + Disable() (*Response, error) + Get(code string, getOpt *GetOptions) (*VPNConfig, *Response, error) +} + +// VPNServiceOp implements VPNService +type VPNServiceOp struct { + client *Client +} + +// Enable VPN for current user +func (s *VPNServiceOp) Enable() (resp *Response, err error) { + return s.client.DoRequest("POST", vpnBasePath, nil, nil) +} + +// Disable VPN for current user +func (s *VPNServiceOp) Disable() (resp *Response, err error) { + return s.client.DoRequest("DELETE", vpnBasePath, nil, nil) + +} + +// Get returns the client vpn config for the currently logged-in user. +func (s *VPNServiceOp) Get(code string, getOpt *GetOptions) (config *VPNConfig, resp *Response, err error) { + params := createGetOptionsURL(getOpt) + config = &VPNConfig{} + path := fmt.Sprintf("%s?code=%s", vpnBasePath, code) + if params != "" { + path += params + } + + resp, err = s.client.DoRequest("GET", path, nil, config) + if err != nil { + return nil, resp, err + } + + return config, resp, err +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-packet/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go new file mode 100644 index 00000000000..3adc559ace3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go @@ -0,0 +1,22 @@ +package packet + +import ( + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/logging" + "github.com/packethost/packngo" +) + +const ( + consumerToken = "aZ9GmqHTPtxevvFq9SK3Pi2yr9YCbRzduCSXF2SNem5sjB91mDq7Th3ZwTtRqMWZ" +) + +type Config struct { + AuthToken string +} + +// Client returns a new client for accessing Packet's API. +func (c *Config) Client() *packngo.Client { + client := cleanhttp.DefaultClient() + client.Transport = logging.NewTransport("Packet", client.Transport) + return packngo.NewClientWithAuth(consumerToken, c.AuthToken, client) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go new file mode 100644 index 00000000000..8221da1e819 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go @@ -0,0 +1,130 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourceOperatingSystem() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketOperatingSystemRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "distro": { + Type: schema.TypeString, + Optional: true, + }, + "version": { + Type: schema.TypeString, + Optional: true, + }, + "provisionable_on": { + Type: schema.TypeString, + Optional: true, + }, + "slug": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourcePacketOperatingSystemRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + name, nameOK := d.GetOk("name") + distro, distroOK := d.GetOk("distro") + version, versionOK := d.GetOk("version") + provisionableOn, provisionableOnOK := d.GetOk("provisionable_on") + + if !nameOK && !distroOK && !versionOK && !provisionableOnOK { + return fmt.Errorf("One of name, distro, version, or provisionable_on must be assigned") + } + + log.Println("[DEBUG] ******") + log.Println("[DEBUG] params", name, distro, version, provisionableOn) + log.Println("[DEBUG] ******") + + oss, _, err := client.OperatingSystems.List() + if err != nil { + return err + } + + final := []packngo.OS{} + temp := []packngo.OS{} + + if nameOK { + for _, os := range oss { + if strings.Contains(strings.ToLower(os.Name), strings.ToLower(name.(string))) { + temp = append(temp, os) + } + final = temp + } + } + + if distroOK { + temp = []packngo.OS{} + if len(temp) == 0 { + final = oss + } + for _, v := range final { + if v.Distro == distro.(string) { + temp = append(temp, v) + } + } + final = temp + } + + if versionOK { + temp = []packngo.OS{} + if len(final) == 0 { + final = oss + } + for _, v := range final { + if v.Version == version.(string) { + temp = append(temp, v) + } + } + final = temp + } + + if provisionableOnOK { + temp = []packngo.OS{} + if len(final) == 0 { + final = oss + } + for _, v := range final { + for _, po := range v.ProvisionableOn { + if po == provisionableOn.(string) { + temp = append(temp, v) + } + } + } + final = temp + } + log.Println("[DEBUG] RESULTS:", final) + + if len(final) == 0 { + return fmt.Errorf("There are no operating systems that match the search criteria") + } + + if len(final) > 1 { + return fmt.Errorf("There is more than one operating system that matches the search criteria") + } + for _, v := range final { + d.Set("name", v.Name) + d.Set("distro", v.Distro) + d.Set("version", v.Version) + d.Set("slug", v.Slug) + d.SetId(v.Slug) + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go new file mode 100644 index 00000000000..fd7ec49109d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go @@ -0,0 +1,96 @@ +package packet + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketPreCreatedIPBlock() *schema.Resource { + s := packetIPComputedFields() + s["project_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + s["global"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + } + s["public"] = &schema.Schema{ + Type: schema.TypeBool, + Required: true, + } + + s["facility"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + s["address_family"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + s["cidr_notation"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + s["quantity"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + } + + return &schema.Resource{ + Read: dataSourcePacketReservedIPBlockRead, + Schema: s, + } +} + +func dataSourcePacketReservedIPBlockRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + projectID := d.Get("project_id").(string) + log.Println("[DEBUG] packet_precreated_ip_block - getting list of IPs in a project") + ips, _, err := client.ProjectIPs.List(projectID) + if err != nil { + return err + } + ipv := d.Get("address_family").(int) + public := d.Get("public").(bool) + global := d.Get("global").(bool) + + if !public && global { + return fmt.Errorf("Private (non-public) global IP address blocks are not supported in Packet") + } + + fval, fok := d.GetOk("facility") + if fok && global { + return fmt.Errorf("You can't specify facility for global IP block - addresses from global blocks can be assigned to devices across several facilities") + } + + if fok { + // lookup of not-global block + facility := fval.(string) + for _, ip := range ips { + if ip.Public == public && ip.AddressFamily == ipv && facility == ip.Facility.Code { + loadBlock(d, &ip) + break + } + } + } else { + // lookup of global block + for _, ip := range ips { + blockGlobal := getGlobalBool(&ip) + if ip.Public == public && ip.AddressFamily == ipv && blockGlobal { + loadBlock(d, &ip) + break + } + } + + } + if d.Get("cidr_notation") == "" { + return fmt.Errorf("Could not find matching reserved block, all IPs were %v", ips) + } + return nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go new file mode 100644 index 00000000000..741a0106059 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go @@ -0,0 +1,54 @@ +package packet + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourceSpotMarketPrice() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketSpotMarketPriceRead, + Schema: map[string]*schema.Schema{ + "facility": { + Type: schema.TypeString, + Required: true, + }, + "plan": { + Type: schema.TypeString, + Required: true, + }, + "price": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func dataSourcePacketSpotMarketPriceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + facility := d.Get("facility").(string) + plan := d.Get("plan").(string) + + prices, _, err := client.SpotMarket.Prices() + if err != nil { + return err + } + + var price float64 + if fac, ok := prices[facility]; ok { + if pri, ok := fac[plan]; ok { + price = pri + } else { + return fmt.Errorf("Facility %s does not have prices for plan %s", facility, plan) + } + } else { + return fmt.Errorf("There is no facility %s", facility) + } + d.Set("price", price) + d.SetId(facility) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go new file mode 100644 index 00000000000..9b945347ac7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go @@ -0,0 +1,49 @@ +package packet + +import ( + "net/http" + "strings" + + "github.com/packethost/packngo" +) + +func friendlyError(err error) error { + if e, ok := err.(*packngo.ErrorResponse); ok { + errors := Errors(e.Errors) + // if packngo gives us blank error strings, populate them with something useful + // this is useful so the user gets some sort of indication of a failure rather than a blank message + if 0 == len(errors) { + errors = Errors{e.SingleError} + } + return &ErrorResponse{ + StatusCode: e.Response.StatusCode, + Errors: errors, + } + } + return err +} + +func isForbidden(err error) bool { + if r, ok := err.(*ErrorResponse); ok { + return r.StatusCode == http.StatusForbidden + } + return false +} + +func isNotFound(err error) bool { + if r, ok := err.(*ErrorResponse); ok { + return r.StatusCode == http.StatusNotFound + } + return false +} + +type Errors []string + +func (e Errors) Error() string { + return strings.Join(e, "; ") +} + +type ErrorResponse struct { + StatusCode int + Errors +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go new file mode 100644 index 00000000000..e88cd5a70f4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go @@ -0,0 +1,60 @@ +package packet + +import ( + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a schema.Provider for managing Packet infrastructure. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_token": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("PACKET_AUTH_TOKEN", nil), + Description: "The API auth key for API operations.", + }, + }, + DataSourcesMap: map[string]*schema.Resource{ + "packet_precreated_ip_block": dataSourcePacketPreCreatedIPBlock(), + "packet_operating_system": dataSourceOperatingSystem(), + "packet_spot_market_price": dataSourceSpotMarketPrice(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "packet_device": resourcePacketDevice(), + "packet_ssh_key": resourcePacketSSHKey(), + "packet_project_ssh_key": resourcePacketProjectSSHKey(), + "packet_project": resourcePacketProject(), + "packet_organization": resourcePacketOrganization(), + "packet_volume": resourcePacketVolume(), + "packet_volume_attachment": resourcePacketVolumeAttachment(), + "packet_reserved_ip_block": resourcePacketReservedIPBlock(), + "packet_ip_attachment": resourcePacketIPAttachment(), + "packet_spot_market_request": resourcePacketSpotMarketRequest(), + "packet_vlan": resourcePacketVlan(), + "packet_bgp_session": resourcePacketBGPSession(), + "packet_port_vlan_attachment": resourcePacketPortVlanAttachment(), + "packet_connect": resourcePacketConnect(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AuthToken: d.Get("auth_token").(string), + } + return config.Client(), nil +} + +var resourceDefaultTimeouts = &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + Default: schema.DefaultTimeout(60 * time.Minute), +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go new file mode 100644 index 00000000000..d548e3986b2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go @@ -0,0 +1,98 @@ +package packet + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/packethost/packngo" +) + +func resourcePacketBGPSession() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketBGPSessionCreate, + Read: resourcePacketBGPSessionRead, + Delete: resourcePacketBGPSessionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "address_family": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"ipv4", "ipv6"}, false), + }, + "default_route": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketBGPSessionCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + dID := d.Get("device_id").(string) + addressFamily := d.Get("address_family").(string) + defaultRoute := d.Get("default_route").(bool) + log.Printf("[DEBUG] creating %s BGP session to device (%s)\n", addressFamily, dID) + bgpSession, _, err := client.BGPSessions.Create( + dID, packngo.CreateBGPSessionRequest{ + AddressFamily: "ipv4", + DefaultRoute: &defaultRoute}) + if err != nil { + return friendlyError(err) + } + + d.SetId(bgpSession.ID) + return resourcePacketBGPSessionRead(d, meta) +} + +func resourcePacketBGPSessionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + bgpSession, _, err := client.BGPSessions.Get(d.Id(), + &packngo.GetOptions{Includes: []string{"device"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + return err + } + defaultRoute := false + if bgpSession.DefaultRoute != nil { + if *(bgpSession.DefaultRoute) { + defaultRoute = true + } + } + d.Set("device_id", bgpSession.Device.ID) + d.Set("address_family", bgpSession.AddressFamily) + d.Set("status", bgpSession.Status) + d.Set("default_route", defaultRoute) + d.SetId(bgpSession.ID) + return nil +} + +func resourcePacketBGPSessionDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.BGPSessions.Delete(d.Id()) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go new file mode 100644 index 00000000000..ccb0b7cc08c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go @@ -0,0 +1,157 @@ +package packet + +import ( + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketConnect() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketConnectCreate, + Read: resourcePacketConnectRead, + Delete: resourcePacketConnectDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "provider_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "facility": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port_speed": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "provider_payload": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + }, + "vxlan": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func waitForConnectStatus(d *schema.ResourceData, target string, pending string, meta interface{}) (interface{}, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{pending}, + Target: []string{target}, + Refresh: connectRefreshFunc(d, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + return stateConf.WaitForState() +} + +func connectRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + + return func() (interface{}, string, error) { + if err := resourcePacketConnectRead(d, meta); err != nil { + return nil, "", err + } + + if status, ok := d.GetOk("status"); ok { + projectId := d.Get("project_id").(string) + c, _, err := client.Connects.Get(d.Id(), projectId, nil) + if err != nil { + return nil, "", friendlyError(err) + } + return c, status.(string), nil + } + + return nil, "", nil + } +} + +func resourcePacketConnectCreate(d *schema.ResourceData, meta interface{}) error { + c := meta.(*packngo.Client) + createRequest := &packngo.ConnectCreateRequest{ + ProjectID: d.Get("project_id").(string), + ProviderID: d.Get("provider_id").(string), + Name: d.Get("name").(string), + Facility: d.Get("facility").(string), + ProviderPayload: d.Get("provider_payload").(string), + VLAN: d.Get("vxlan").(int), + PortSpeed: d.Get("port_speed").(int), + Description: d.Get("name").(string), + Tags: []string{d.Get("name").(string)}, + } + + pc, _, err := c.Connects.Create(createRequest) + if err != nil { + return friendlyError(err) + } + d.SetId(pc.ID) + _, err = waitForConnectStatus(d, "PROVISIONED", "PROVISIONING", meta) + if err != nil { + return friendlyError(err) + } + return resourcePacketConnectRead(d, meta) +} + +func resourcePacketConnectRead(d *schema.ResourceData, meta interface{}) error { + c := meta.(*packngo.Client) + pc, _, err := c.Connects.Get(d.Id(), d.Get("project_id").(string), nil) + if err != nil { + return friendlyError(err) + } + d.Set("name", pc.Name) + d.Set("provider_id", pc.ProviderID) + d.Set("provider_payload", pc.ProviderPayload) + d.Set("status", pc.Status) + d.Set("port_speed", pc.PortSpeed) + d.Set("vxlan", pc.VLAN) + + return nil +} + +func resourcePacketConnectDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + pc, _, err := client.Connects.Deprovision(d.Id(), d.Get("project_id").(string), false) + if err != nil { + return friendlyError(err) + } + _, err = waitForConnectStatus(d, "DEPROVISIONED", "DEPROVISIONING", meta) + if err != nil { + return friendlyError(err) + } + + _, err = client.Connects.Delete(d.Id(), pc.ProjectID) + if err != nil { + return friendlyError(err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go new file mode 100644 index 00000000000..8af251c73bd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go @@ -0,0 +1,652 @@ +package packet + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform/helper/validation" + "github.com/packethost/packngo" +) + +var matchIPXEScript = regexp.MustCompile(`(?i)^#![i]?pxe`) + +func resourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketDeviceCreate, + Read: resourcePacketDeviceRead, + Update: resourcePacketDeviceUpdate, + Delete: resourcePacketDeviceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "hostname": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "operating_system": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "facility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateFacilityForDevice, + Deprecated: "Use the 'facilities' array instead.", + ConflictsWith: []string{"facilities"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // ignore set of empty facility "" => "xxx1" + if new == "" { + return true + } + return false + }, + }, + + "facilities": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"facility"}, + }, + + "plan": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "billing_cycle": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "root_password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + + "locked": { + Type: schema.TypeBool, + Computed: true, + }, + + "access_public_ipv6": { + Type: schema.TypeString, + Computed: true, + }, + + "access_public_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + + "access_private_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + + "network_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"layer3", "layer2-bonded", "layer2-individual", "hybrid"}, false), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new == "" { + return true + } + return false + }, + }, + + "ports": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "mac": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "bonded": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "network": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + + "family": { + Type: schema.TypeInt, + Computed: true, + }, + + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + + "public": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "public_ipv4_subnet_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + }, + + "ipxe_script_url": { + Type: schema.TypeString, + Optional: true, + }, + + "always_pxe": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "hardware_reservation_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new == "next-available" && len(old) > 0 { + return true + } + return false + }, + }, + + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + s, _ := structure.NormalizeJsonString(v) + return s + }, + ValidateFunc: validation.ValidateJsonString, + }, + "project_ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ssh_key_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + var facs []string + f, ok := d.GetOk("facility") + + if ok { + facs = []string{f.(string)} + } else { + facs = convertStringArr(d.Get("facilities").([]interface{})) + if len(facs) == 0 { + return fmt.Errorf("You must set either 'facilities' or 'facility'") + } + } + + createRequest := &packngo.DeviceCreateRequest{ + Hostname: d.Get("hostname").(string), + Plan: d.Get("plan").(string), + Facility: facs, + OS: d.Get("operating_system").(string), + BillingCycle: d.Get("billing_cycle").(string), + ProjectID: d.Get("project_id").(string), + PublicIPv4SubnetSize: d.Get("public_ipv4_subnet_size").(int), + } + targetNetworkState, nTypeOk := d.GetOk("network_type") + if attr, ok := d.GetOk("user_data"); ok { + createRequest.UserData = attr.(string) + } + + if attr, ok := d.GetOk("ipxe_script_url"); ok { + createRequest.IPXEScriptURL = attr.(string) + } + + if attr, ok := d.GetOk("hardware_reservation_id"); ok { + createRequest.HardwareReservationID = attr.(string) + } + + if createRequest.OS == "custom_ipxe" { + if createRequest.IPXEScriptURL == "" && createRequest.UserData == "" { + return friendlyError(errors.New("\"ipxe_script_url\" or \"user_data\"" + + " must be provided when \"custom_ipxe\" OS is selected.")) + } + + // ipxe_script_url + user_data is OK, unless user_data is an ipxe script in + // which case it's an error. + if createRequest.IPXEScriptURL != "" { + if matchIPXEScript.MatchString(createRequest.UserData) { + return friendlyError(errors.New("\"user_data\" should not be an iPXE " + + "script when \"ipxe_script_url\" is also provided.")) + } + } + } + + if createRequest.OS != "custom_ipxe" && createRequest.IPXEScriptURL != "" { + return friendlyError(errors.New("\"ipxe_script_url\" argument provided, but" + + " OS is not \"custom_ipxe\". Please verify and fix device arguments.")) + } + + if attr, ok := d.GetOk("always_pxe"); ok { + createRequest.AlwaysPXE = attr.(bool) + } + + projectKeys := d.Get("project_ssh_key_ids.#").(int) + if projectKeys > 0 { + createRequest.ProjectSSHKeys = convertStringArr(d.Get("project_ssh_key_ids").([]interface{})) + } + + tags := d.Get("tags.#").(int) + if tags > 0 { + createRequest.Tags = convertStringArr(d.Get("tags").([]interface{})) + } + + if attr, ok := d.GetOk("storage"); ok { + s, err := structure.NormalizeJsonString(attr.(string)) + if err != nil { + return errwrap.Wrapf("storage param contains invalid JSON: {{err}}", err) + } + createRequest.Storage = s + } + + newDevice, _, err := client.Devices.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(newDevice.ID) + + // Wait for the device so we can get the networking attributes that show up after a while. + _, err = waitForDeviceAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) + if err != nil { + if isForbidden(err) { + // If the device doesn't get to the active state, we can't recover it from here. + d.SetId("") + + return errors.New("provisioning time limit exceeded; the Packet team will investigate") + } + return err + } + + if nTypeOk { + _, err = waitForDeviceAttribute(d, "layer3", []string{"hybrid", "layer2-bonded", "layer2-individual"}, "network_type", meta) + + tns := targetNetworkState.(string) + if tns != "layer3" { + _, err := client.DevicePorts.DeviceToNetworkType(newDevice.ID, tns) + if err != nil { + return err + } + } + } + + return resourcePacketDeviceRead(d, meta) +} + +type NetworkInfo struct { + Networks []map[string]interface{} + IPv4SubnetSize int + Host string + PublicIPv4 string + PublicIPv6 string + PrivateIPv4 string +} + +func getNetworkInfo(ips []*packngo.IPAddressAssignment) NetworkInfo { + ni := NetworkInfo{Networks: make([]map[string]interface{}, 0, 1)} + for _, ip := range ips { + network := map[string]interface{}{ + "address": ip.Address, + "gateway": ip.Gateway, + "family": ip.AddressFamily, + "cidr": ip.CIDR, + "public": ip.Public, + } + ni.Networks = append(ni.Networks, network) + + // Initial device IPs are fixed and marked as "Management" + if ip.Management { + if ip.AddressFamily == 4 { + if ip.Public { + ni.Host = ip.Address + ni.IPv4SubnetSize = ip.CIDR + ni.PublicIPv4 = ip.Address + } else { + ni.PrivateIPv4 = ip.Address + } + } else { + ni.PublicIPv6 = ip.Address + } + } + } + return ni +} + +func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + device, _, err := client.Devices.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project"}}) + if err != nil { + err = friendlyError(err) + + // If the device somehow already destroyed, mark as succesfully gone. + if isNotFound(err) { + d.SetId("") + return nil + } + + return err + } + + d.Set("hostname", device.Hostname) + d.Set("plan", device.Plan.Slug) + d.Set("facility", device.Facility.Code) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("locked", device.Locked) + d.Set("created", device.Created) + d.Set("updated", device.Updated) + d.Set("ipxe_script_url", device.IPXEScriptURL) + d.Set("always_pxe", device.AlwaysPXE) + d.Set("root_password", device.RootPassword) + d.Set("project_id", device.Project.ID) + storageString, err := structure.FlattenJsonToString(device.Storage) + if err != nil { + return fmt.Errorf("[ERR] Error getting storage JSON string for device (%s): %s", d.Id(), err) + } + d.Set("storage", storageString) + + if len(device.HardwareReservation.Href) > 0 { + d.Set("hardware_reservation_id", path.Base(device.HardwareReservation.Href)) + } + d.Set("network_type", device.NetworkType) + + d.Set("tags", device.Tags) + keyIDs := []string{} + for _, k := range device.SSHKeys { + keyIDs = append(keyIDs, filepath.Base(k.URL)) + } + d.Set("ssh_key_ids", keyIDs) + networkInfo := getNetworkInfo(device.Network) + + sort.SliceStable(networkInfo.Networks, func(i, j int) bool { + famI := networkInfo.Networks[i]["family"].(int) + famJ := networkInfo.Networks[j]["family"].(int) + pubI := networkInfo.Networks[i]["public"].(bool) + pubJ := networkInfo.Networks[j]["public"].(bool) + return getNetworkRank(famI, pubI) < getNetworkRank(famJ, pubJ) + }) + + d.Set("network", networkInfo.Networks) + d.Set("public_ipv4_subnet_size", networkInfo.IPv4SubnetSize) + d.Set("access_public_ipv4", networkInfo.PublicIPv4) + d.Set("access_private_ipv4", networkInfo.PrivateIPv4) + d.Set("access_public_ipv6", networkInfo.PublicIPv6) + + ports := getPorts(device.NetworkPorts) + d.Set("ports", ports) + + if networkInfo.Host != "" { + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": networkInfo.Host, + }) + } + + return nil +} + +func getNetworkRank(family int, public bool) int { + switch { + case family == 4 && public: + return 0 + case family == 6: + return 1 + case family == 4 && public: + return 2 + } + return 3 +} + +func getPorts(ps []packngo.Port) []map[string]interface{} { + ret := make([]map[string]interface{}, 0, 1) + for _, p := range ps { + port := map[string]interface{}{ + "name": p.Name, + "id": p.ID, + "type": p.Type, + "mac": p.Data.MAC, + "bonded": p.Data.Bonded, + } + ret = append(ret, port) + } + return ret +} + +func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") { + var action func(string) (*packngo.Response, error) + if d.Get("locked").(bool) { + action = client.Devices.Lock + } else { + action = client.Devices.Unlock + } + if _, err := action(d.Id()); err != nil { + return friendlyError(err) + } + } + ur := packngo.DeviceUpdateRequest{} + + if d.HasChange("description") { + dDesc := d.Get("description").(string) + ur.Description = &dDesc + } + if d.HasChange("hostname") { + dHostname := d.Get("hostname").(string) + ur.Hostname = &dHostname + } + if d.HasChange("tags") { + ts := d.Get("tags") + sts := []string{} + + switch ts.(type) { + case []interface{}: + for _, v := range ts.([]interface{}) { + sts = append(sts, v.(string)) + } + ur.Tags = &sts + default: + return friendlyError(fmt.Errorf("garbage in tags: %s", ts)) + } + } + if d.HasChange("ipxe_script_url") { + dUrl := d.Get("ipxe_script_url").(string) + ur.IPXEScriptURL = &dUrl + } + if d.HasChange("always_pxe") { + dPXE := d.Get("always_pxe").(bool) + ur.AlwaysPXE = &dPXE + } + if !reflect.DeepEqual(ur, packngo.DeviceUpdateRequest{}) { + if _, _, err := client.Devices.Update(d.Id(), &ur); err != nil { + return friendlyError(err) + } + + } + if d.HasChange("network_type") { + target, ok := d.GetOk("network_type") + if ok { + targetType := target.(string) + _, err := client.DevicePorts.DeviceToNetworkType(d.Id(), targetType) + if err != nil { + return err + } + } + } + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if _, err := client.Devices.Delete(d.Id()); err != nil { + return friendlyError(err) + } + + return nil +} + +func waitForDeviceAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{target}, + Refresh: newDeviceStateRefreshFunc(d, attribute, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + return stateConf.WaitForState() +} + +func newDeviceStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + + return func() (interface{}, string, error) { + if err := resourcePacketDeviceRead(d, meta); err != nil { + return nil, "", err + } + + if attr, ok := d.GetOk(attribute); ok { + device, _, err := client.Devices.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project"}}) + if err != nil { + return nil, "", friendlyError(err) + } + return &device, attr.(string), nil + } + + return nil, "", nil + } +} + +// powerOnAndWait Powers on the device and waits for it to be active. +func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.Devices.PowerOn(d.Id()) + if err != nil { + return friendlyError(err) + } + + _, err = waitForDeviceAttribute(d, "active", []string{"off"}, "state", client) + return err +} + +func validateFacilityForDevice(v interface{}, k string) (ws []string, errors []error) { + if v.(string) == "any" { + errors = append(errors, fmt.Errorf(`Cannot use facility: "any"`)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go new file mode 100644 index 00000000000..710f0db2198 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go @@ -0,0 +1,102 @@ +package packet + +import ( + "fmt" + "log" + "path" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketIPAttachment() *schema.Resource { + ipAttachmentSchema := packetIPResourceComputedFields() + ipAttachmentSchema["device_id"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + } + ipAttachmentSchema["cidr_notation"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + } + return &schema.Resource{ + Create: resourcePacketIPAttachmentCreate, + Read: resourcePacketIPAttachmentRead, + Delete: resourcePacketIPAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: ipAttachmentSchema, + } +} + +func resourcePacketIPAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + ipa := d.Get("cidr_notation").(string) + + req := packngo.AddressStruct{Address: ipa} + + assignment, _, err := client.DeviceIPs.Assign(deviceID, &req) + if err != nil { + return fmt.Errorf("error assigning address %s to device %s: %s", ipa, deviceID, err) + } + + d.SetId(assignment.ID) + + return resourcePacketIPAttachmentRead(d, meta) +} + +func resourcePacketIPAttachmentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + assignment, _, err := client.DeviceIPs.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the IP attachment was already destroyed, mark as succesfully gone. + if isNotFound(err) { + log.Printf("[DEBUG] IP attachment %q not found or has been deleted", d.Id()) + d.SetId("") + return nil + } + return err + } + + d.SetId(assignment.ID) + d.Set("address", assignment.Address) + d.Set("gateway", assignment.Gateway) + d.Set("network", assignment.Network) + d.Set("netmask", assignment.Netmask) + d.Set("address_family", assignment.AddressFamily) + d.Set("cidr", assignment.CIDR) + d.Set("public", assignment.Public) + d.Set("management", assignment.Management) + d.Set("manageable", assignment.Manageable) + + g := false + if assignment.Global != nil { + g = *(assignment.Global) + } + d.Set("global", g) + + d.Set("device_id", path.Base(assignment.AssignedTo.Href)) + d.Set("cidr_notation", + fmt.Sprintf("%s/%d", assignment.Network, assignment.CIDR)) + + return nil +} + +func resourcePacketIPAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.DeviceIPs.Unassign(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go new file mode 100644 index 00000000000..ac821e38eaa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go @@ -0,0 +1,167 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketOrganization() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketOrganizationCreate, + Read: resourcePacketOrganizationRead, + Update: resourcePacketOrganizationUpdate, + Delete: resourcePacketOrganizationDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "website": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "twitter": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "logo": { + Type: schema.TypeString, + Optional: true, + Required: false, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketOrganizationCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.OrganizationCreateRequest{ + Name: d.Get("name").(string), + } + + if attr, ok := d.GetOk("website"); ok { + createRequest.Website = attr.(string) + } + + if attr, ok := d.GetOk("description"); ok { + createRequest.Description = attr.(string) + } + + if attr, ok := d.GetOk("twitter"); ok { + createRequest.Twitter = attr.(string) + } + + if attr, ok := d.GetOk("logo"); ok { + createRequest.Logo = attr.(string) + } + + org, _, err := client.Organizations.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(org.ID) + + return resourcePacketOrganizationRead(d, meta) +} + +func resourcePacketOrganizationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.Organizations.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the project somehow already destroyed, mark as succesfully gone. + if isNotFound(err) { + d.SetId("") + + return nil + } + + return err + } + + d.Set("id", key.ID) + d.Set("name", key.Name) + d.Set("description", key.Description) + d.Set("website", key.Website) + d.Set("twitter", key.Twitter) + d.Set("logo", key.Logo) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketOrganizationUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.OrganizationUpdateRequest{} + + if d.HasChange("name") { + oName := d.Get("name").(string) + updateRequest.Name = &oName + } + + if d.HasChange("description") { + oDescription := d.Get("description").(string) + updateRequest.Description = &oDescription + } + + if d.HasChange("website") { + oWebsite := d.Get("website").(string) + updateRequest.Website = &oWebsite + } + + if d.HasChange("twitter") { + oTwitter := d.Get("twitter").(string) + updateRequest.Twitter = &oTwitter + } + + if d.HasChange("logo") { + oLogo := d.Get("logo").(string) + updateRequest.Logo = &oLogo + } + _, _, err := client.Organizations.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + + return resourcePacketOrganizationRead(d, meta) +} + +func resourcePacketOrganizationDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.Organizations.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go new file mode 100644 index 00000000000..f1fa216cd70 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go @@ -0,0 +1,171 @@ +package packet + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketPortVlanAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketPortVlanAttachmentCreate, + Read: resourcePacketPortVlanAttachmentRead, + Delete: resourcePacketPortVlanAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "force_bond": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vlan_vnid": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "vlan_id": { + Type: schema.TypeString, + Computed: true, + }, + "port_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketPortVlanAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + pName := d.Get("port_name").(string) + vlanVNID := d.Get("vlan_vnid").(int) + + dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{Includes: []string{"virtual_networks,project"}}) + if err != nil { + return err + } + + portFound := false + vlanFound := false + var port packngo.Port + for _, p := range dev.NetworkPorts { + if p.Name == pName { + portFound = true + port = p + for _, n := range p.AttachedVirtualNetworks { + if vlanVNID == n.VXLAN { + vlanFound = true + break + } + } + break + } + } + if !portFound { + return fmt.Errorf("Device %s doesn't have port %s", deviceID, pName) + } + if vlanFound { + log.Printf("Port %s already has VLAN %d assigned", pName, vlanVNID) + return nil + } + + vlanID := "" + facility := dev.Facility.Code + vlans, _, err := client.ProjectVirtualNetworks.List(dev.Project.ID, nil) + if err != nil { + return err + } + for _, n := range vlans.VirtualNetworks { + if (n.VXLAN == vlanVNID) && (n.FacilityCode == facility) { + vlanID = n.ID + } + } + if len(vlanID) == 0 { + return fmt.Errorf("VLAN with VNID %d doesn't exist in facilty %s", vlanVNID, facility) + } + + par := &packngo.PortAssignRequest{PortID: port.ID, VirtualNetworkID: vlanID} + + _, _, err = client.DevicePorts.Assign(par) + if err != nil { + return err + } + + d.SetId(port.ID + ":" + vlanID) + return resourcePacketPortVlanAttachmentRead(d, meta) +} + +func resourcePacketPortVlanAttachmentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + pName := d.Get("port_name").(string) + vlanVNID := d.Get("vlan_vnid").(int) + + dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{Includes: []string{"virtual_networks,project"}}) + if err != nil { + return err + } + portFound := false + vlanFound := false + portID := "" + vlanID := "" + for _, p := range dev.NetworkPorts { + if p.Name == pName { + portFound = true + portID = p.ID + for _, n := range p.AttachedVirtualNetworks { + if vlanVNID == n.VXLAN { + vlanFound = true + vlanID = n.ID + break + } + } + break + } + } + d.Set("port_id", portID) + d.Set("vlan_id", vlanID) + if !portFound { + return fmt.Errorf("Device %s doesn't have port %s", deviceID, pName) + } + if !vlanFound { + d.SetId(portID) + } + return nil +} + +func resourcePacketPortVlanAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + pID := d.Get("port_id").(string) + vlanID := d.Get("vlan_id").(string) + par := &packngo.PortAssignRequest{PortID: pID, VirtualNetworkID: vlanID} + client := meta.(*packngo.Client) + portPtr, _, err := client.DevicePorts.Unassign(par) + if err != nil { + return err + } + forceBond := d.Get("force_bond").(bool) + if forceBond && (len(portPtr.AttachedVirtualNetworks) == 0) { + _, _, err = client.DevicePorts.Bond(&packngo.BondRequest{PortID: pID, BulkEnable: false}) + if err != nil { + return friendlyError(err) + } + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go new file mode 100644 index 00000000000..83563888ce5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go @@ -0,0 +1,281 @@ +package packet + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/packethost/packngo" +) + +var uuidRE = regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$") + +func resourcePacketProject() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketProjectCreate, + Read: resourcePacketProjectRead, + Update: resourcePacketProjectUpdate, + Delete: resourcePacketProjectDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + + "backend_transfer": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "payment_method_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.ToLower(strings.Trim(old, `"`)) == strings.ToLower(strings.Trim(new, `"`)) + }, + ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), + }, + + "organization_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.ToLower(strings.Trim(old, `"`)) == strings.ToLower(strings.Trim(new, `"`)) + }, + ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), + }, + "bgp_config": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"local", "global"}, false), + }, + "asn": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "md5": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "max_prefix": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func expandBGPConfig(d *schema.ResourceData) packngo.CreateBGPConfigRequest { + bgpCreateRequest := packngo.CreateBGPConfigRequest{ + DeploymentType: d.Get("bgp_config.0.deployment_type").(string), + Asn: d.Get("bgp_config.0.asn").(int), + } + md5, ok := d.GetOk("bgp_config.0.md5") + if ok { + bgpCreateRequest.Md5 = md5.(string) + } + + return bgpCreateRequest + +} + +func resourcePacketProjectCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.ProjectCreateRequest{ + Name: d.Get("name").(string), + OrganizationID: d.Get("organization_id").(string), + } + + project, _, err := client.Projects.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(project.ID) + + _, hasBGPConfig := d.GetOk("bgp_config") + if hasBGPConfig { + bgpCR := expandBGPConfig(d) + _, err := client.BGPConfig.Create(project.ID, bgpCR) + if err != nil { + return friendlyError(err) + } + } + + backendTransfer := d.Get("backend_transfer").(bool) + if backendTransfer { + pur := packngo.ProjectUpdateRequest{BackendTransfer: &backendTransfer} + _, _, err := client.Projects.Update(project.ID, &pur) + if err != nil { + return friendlyError(err) + } + } + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + proj, _, err := client.Projects.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the project somehow already destroyed, mark as succesfully gone. + if isNotFound(err) { + d.SetId("") + + return nil + } + + return err + } + + d.SetId(proj.ID) + d.Set("payment_method_id", path.Base(proj.PaymentMethod.URL)) + d.Set("name", proj.Name) + d.Set("organization_id", path.Base(proj.Organization.URL)) + d.Set("created", proj.Created) + d.Set("updated", proj.Updated) + d.Set("backend_transfer", proj.BackendTransfer) + + bgpConf, _, err := client.BGPConfig.Get(proj.ID, nil) + + if (err == nil) && (bgpConf != nil) { + // guard against an empty struct + if bgpConf.ID != "" { + err := d.Set("bgp_config", flattenBGPConfig(bgpConf)) + if err != nil { + err = friendlyError(err) + return err + } + } + } + return nil +} + +func flattenBGPConfig(l *packngo.BGPConfig) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if l == nil { + return nil + } + + r := make(map[string]interface{}) + + if l.Status != "" { + r["status"] = l.Status + } + if l.DeploymentType != "" { + r["deployment_type"] = l.DeploymentType + } + if l.Md5 != "" { + r["md5"] = l.Md5 + } + if l.Asn != 0 { + r["asn"] = l.Asn + } + if l.MaxPrefix != 0 { + r["max_prefix"] = l.MaxPrefix + } + + result = append(result, r) + + return result +} + +func resourcePacketProjectUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + updateRequest := &packngo.ProjectUpdateRequest{} + if d.HasChange("name") { + pName := d.Get("name").(string) + updateRequest.Name = &pName + } + if d.HasChange("payment_method_id") { + pPayment := d.Get("payment_method_id").(string) + updateRequest.PaymentMethodID = &pPayment + } + if d.HasChange("backend_transfer") { + pBT := d.Get("backend_transfer").(bool) + updateRequest.BackendTransfer = &pBT + } + if d.HasChange("bgp_config") { + o, n := d.GetChange("bgp_config") + oldarr := o.([]interface{}) + newarr := n.([]interface{}) + if len(newarr) == 1 { + bgpCreateRequest := expandBGPConfig(d) + _, err := client.BGPConfig.Create(d.Id(), bgpCreateRequest) + if err != nil { + return friendlyError(err) + } + } else { + if len(oldarr) == 1 { + m := oldarr[0].(map[string]interface{}) + + bgpConfStr := fmt.Sprintf( + "bgp_config {\n"+ + " deployment_type = \"%s\"\n"+ + " md5 = \"%s\"\n"+ + " asn = %d\n"+ + "}", m["deployment_type"].(string), m["md5"].(string), + m["asn"].(int)) + + errStr := fmt.Errorf("BGP Config can not be removed from a project, please add back\n%s", bgpConfStr) + return friendlyError(errStr) + } + } + } else { + _, _, err := client.Projects.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + } + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.Projects.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go new file mode 100644 index 00000000000..224ec28bc32 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go @@ -0,0 +1,54 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketProjectSSHKey() *schema.Resource { + pkeySchema := packetSSHKeyCommonFields() + pkeySchema["project_id"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + } + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketProjectSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + + Schema: pkeySchema, + } +} + +func resourcePacketProjectSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + projectID := d.Get("project_id").(string) + projectKeys, _, err := client.SSHKeys.ProjectList(projectID) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + + return err + } + + keyFound := false + for _, k := range projectKeys { + if k.ID == d.Id() { + keyFound = true + d.Set("name", k.Label) + d.Set("public_key", k.Key) + d.Set("fingerprint", k.FingerPrint) + d.Set("created", k.Created) + d.Set("updated", k.Updated) + } + } + if !keyFound { + d.SetId("") + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go new file mode 100644 index 00000000000..ae81abefff6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go @@ -0,0 +1,234 @@ +package packet + +import ( + "fmt" + "path" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/packethost/packngo" +) + +func packetIPComputedFields() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "address_family": { + Type: schema.TypeInt, + Computed: true, + }, + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "netmask": { + Type: schema.TypeString, + Computed: true, + }, + "network": { + Type: schema.TypeString, + Computed: true, + }, + "manageable": { + Type: schema.TypeBool, + Computed: true, + }, + "management": { + Type: schema.TypeBool, + Computed: true, + }, + } +} + +func packetIPResourceComputedFields() map[string]*schema.Schema { + s := packetIPComputedFields() + s["address_family"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + } + s["public"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + s["global"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + return s +} + +func resourcePacketReservedIPBlock() *schema.Resource { + reservedBlockSchema := packetIPResourceComputedFields() + reservedBlockSchema["project_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + reservedBlockSchema["facility"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + } + reservedBlockSchema["quantity"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + } + reservedBlockSchema["type"] = &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Default: "public_ipv4", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"public_ipv4", "global_ipv4"}, false), + } + reservedBlockSchema["cidr_notation"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return &schema.Resource{ + Create: resourcePacketReservedIPBlockCreate, + Read: resourcePacketReservedIPBlockRead, + Delete: resourcePacketReservedIPBlockDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: reservedBlockSchema, + } +} + +func resourcePacketReservedIPBlockCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + quantity := d.Get("quantity").(int) + typ := d.Get("type").(string) + + req := packngo.IPReservationRequest{ + Type: typ, + Quantity: quantity, + } + f, ok := d.GetOk("facility") + + if ok && typ == "global_ipv4" { + return fmt.Errorf("Facility can not be set for type == global_ipv4") + } + fs := f.(string) + if typ == "public_ipv4" { + req.Facility = &fs + } + + projectID := d.Get("project_id").(string) + + blockAddr, _, err := client.ProjectIPs.Request(projectID, &req) + if err != nil { + return fmt.Errorf("Error reserving IP address block: %s", err) + } + + d.Set("project_id", projectID) + d.SetId(blockAddr.ID) + + return resourcePacketReservedIPBlockRead(d, meta) +} + +func getGlobalBool(r *packngo.IPAddressReservation) bool { + if r.Global != nil { + return *(r.Global) + } + return false +} + +func getType(r *packngo.IPAddressReservation) (string, error) { + globalBool := getGlobalBool(r) + switch { + case !r.Public: + return fmt.Sprintf("private_ipv%d", r.AddressFamily), nil + case r.Public && !globalBool: + return fmt.Sprintf("public_ipv%d", r.AddressFamily), nil + case r.Public && globalBool: + return fmt.Sprintf("global_ipv%d", r.AddressFamily), nil + } + return "", fmt.Errorf("Unknown reservation type %+v", r) +} + +func loadBlock(d *schema.ResourceData, reservedBlock *packngo.IPAddressReservation) error { + ipv4CIDRToQuantity := map[int]int{32: 1, 31: 2, 30: 4, 29: 8, 28: 16, 27: 32, 26: 64, 25: 128, 24: 256} + + d.SetId(reservedBlock.ID) + d.Set("address", reservedBlock.Address) + if reservedBlock.Facility != nil { + d.Set("facility", reservedBlock.Facility.Code) + } + d.Set("gateway", reservedBlock.Gateway) + d.Set("network", reservedBlock.Network) + d.Set("netmask", reservedBlock.Netmask) + d.Set("address_family", reservedBlock.AddressFamily) + d.Set("cidr", reservedBlock.CIDR) + typ, err := getType(reservedBlock) + if err != nil { + return err + } + d.Set("type", typ) + d.Set("public", reservedBlock.Public) + d.Set("management", reservedBlock.Management) + d.Set("manageable", reservedBlock.Manageable) + if reservedBlock.AddressFamily == 4 { + d.Set("quantity", ipv4CIDRToQuantity[reservedBlock.CIDR]) + } else { + // In Packet, a reserved IPv6 block is allocated when a device is run in a project. + // It's always /56, and it can't be created with Terraform, only imported. + // The longest assignable prefix is /64, making it max 256 subnets per block. + // The following logic will hold as long as /64 is the smallest assignable subnet size. + bits := 64 - reservedBlock.CIDR + if bits > 30 { + return fmt.Errorf("Strange (too small) CIDR prefix: %d", reservedBlock.CIDR) + } + d.Set("quantity", 1< 0 { + for i, f := range smr.Facilities { + facilityIDs[i] = f.ID + } + } + d.Set("project_id", smr.Project.ID) + + return nil +} + +func resourcePacketSpotMarketRequestDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + var waitForDevices bool + + if val, ok := d.GetOk("wait_for_devices"); ok { + waitForDevices = val.(bool) + } + if waitForDevices { + smr, _, err := client.SpotMarketRequests.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + if err != nil { + return nil + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"not_done"}, + Target: []string{"done"}, + Refresh: resourceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 5 * time.Second, + Delay: 3 * time.Second, // Wait 10 secs before starting + NotFoundChecks: 600, //Setting high number, to support long timeouts + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + for _, d := range smr.Devices { + _, err := client.Devices.Delete(d.ID) + if err != nil { + return err + } + } + } + _, err := client.SpotMarketRequests.Delete(d.Id(), true) + if err != nil { + return nil + } + return nil +} + +func resourceStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + client := meta.(*packngo.Client) + smr, _, err := client.SpotMarketRequests.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + + if err != nil { + return nil, "", err + + } + var finished bool + + for _, d := range smr.Devices { + + dev, _, _ := client.Devices.Get(d.ID, nil) + if dev.State != "active" { + break + } else { + finished = true + } + } + if finished { + return smr, "done", nil + } + return nil, "not_done", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go new file mode 100644 index 00000000000..ff9de66672a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go @@ -0,0 +1,135 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func packetSSHKeyCommonFields() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "public_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + } + +} + +func resourcePacketSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + + Schema: packetSSHKeyCommonFields(), + } +} + +func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.SSHKeyCreateRequest{ + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + projectID, isProjectKey := d.GetOk("project_id") + if isProjectKey { + createRequest.ProjectID = projectID.(string) + } + + key, _, err := client.SSHKeys.Create(createRequest) + if err != nil { + return friendlyError(err) + } + + d.SetId(key.ID) + if isProjectKey { + return resourcePacketProjectSSHKeyRead(d, meta) + } + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.SSHKeys.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + + // If the key is somehow already destroyed, mark as + // succesfully gone + if isNotFound(err) { + d.SetId("") + return nil + } + + return err + } + + d.Set("id", key.ID) + d.Set("name", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.FingerPrint) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.SSHKeyUpdateRequest{} + + if d.HasChange("name") { + kName := d.Get("name").(string) + updateRequest.Label = &kName + } + + if d.HasChange("public_key") { + kKey := d.Get("public_key").(string) + updateRequest.Key = &kKey + } + + _, _, err := client.SSHKeys.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.SSHKeys.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go new file mode 100644 index 00000000000..0b3fc1df75d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go @@ -0,0 +1,84 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketVlan() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketVlanCreate, + Read: resourcePacketVlanRead, + Delete: resourcePacketVlanDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Required: false, + Optional: true, + ForceNew: true, + }, + "facility": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vxlan": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourcePacketVlanCreate(d *schema.ResourceData, meta interface{}) error { + c := meta.(*packngo.Client) + createRequest := &packngo.VirtualNetworkCreateRequest{ + ProjectID: d.Get("project_id").(string), + Description: d.Get("description").(string), + Facility: d.Get("facility").(string), + } + vlan, _, err := c.ProjectVirtualNetworks.Create(createRequest) + if err != nil { + return friendlyError(err) + } + d.SetId(vlan.ID) + return resourcePacketVlanRead(d, meta) +} + +func resourcePacketVlanRead(d *schema.ResourceData, meta interface{}) error { + c := meta.(*packngo.Client) + + vlan, _, err := c.ProjectVirtualNetworks.Get(d.Id(), + &packngo.GetOptions{Includes: []string{"assigned_to"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + return err + + } + d.Set("description", vlan.Description) + d.Set("project_id", vlan.Project.ID) + d.Set("vxlan", vlan.VXLAN) + return nil +} + +func resourcePacketVlanDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + _, err := client.ProjectVirtualNetworks.Delete(d.Id()) + if err != nil { + return friendlyError(err) + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go new file mode 100644 index 00000000000..03464940a30 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go @@ -0,0 +1,312 @@ +package packet + +import ( + "errors" + "fmt" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketVolume() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketVolumeCreate, + Read: resourcePacketVolumeRead, + Update: resourcePacketVolumeUpdate, + Delete: resourcePacketVolumeDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Required: false, + Optional: true, + }, + + "size": { + Type: schema.TypeInt, + Required: true, + }, + + "facility": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "plan": { + Type: schema.TypeString, + Required: true, + }, + + "billing_cycle": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "locked": { + Type: schema.TypeBool, + Optional: true, + }, + + "snapshot_policies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshot_frequency": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "snapshot_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "attachments": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketVolumeCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.VolumeCreateRequest{ + PlanID: d.Get("plan").(string), + FacilityID: d.Get("facility").(string), + Size: d.Get("size").(int), + Locked: d.Get("locked").(bool), + } + + if attr, ok := d.GetOk("billing_cycle"); ok { + createRequest.BillingCycle = attr.(string) + } else { + createRequest.BillingCycle = "hourly" + } + + if attr, ok := d.GetOk("description"); ok { + createRequest.Description = attr.(string) + } + + snapshot_count := d.Get("snapshot_policies.#").(int) + if snapshot_count > 0 { + createRequest.SnapshotPolicies = make([]*packngo.SnapshotPolicy, 0, snapshot_count) + for i := 0; i < snapshot_count; i++ { + policy := new(packngo.SnapshotPolicy) + policy.SnapshotFrequency = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_frequency", i)).(string) + policy.SnapshotCount = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_count", i)).(int) + createRequest.SnapshotPolicies = append(createRequest.SnapshotPolicies, policy) + } + } + + newVolume, _, err := client.Volumes.Create(createRequest, d.Get("project_id").(string)) + if err != nil { + return friendlyError(err) + } + + d.SetId(newVolume.ID) + + _, err = waitForVolumeAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) + if err != nil { + if isForbidden(err) { + // If the volume doesn't get to the active state, we can't recover it from here. + d.SetId("") + + return errors.New("provisioning time limit exceeded; the Packet team will investigate") + } + return err + } + + return resourcePacketVolumeRead(d, meta) +} + +func waitForVolumeAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{target}, + Refresh: newVolumeStateRefreshFunc(d, attribute, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + return stateConf.WaitForState() +} + +func newVolumeStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + + return func() (interface{}, string, error) { + if err := resourcePacketVolumeRead(d, meta); err != nil { + return nil, "", err + } + + if attr, ok := d.GetOk(attribute); ok { + volume, _, err := client.Volumes.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "snapshot_policies", "facility"}}) + if err != nil { + return nil, "", friendlyError(err) + } + return &volume, attr.(string), nil + } + + return nil, "", nil + } +} + +func resourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + volume, _, err := client.Volumes.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "snapshot_policies", "facility"}}) + if err != nil { + err = friendlyError(err) + + // If the volume somehow already destroyed, mark as succesfully gone. + if isNotFound(err) { + d.SetId("") + return nil + } + + return err + } + + d.Set("name", volume.Name) + d.Set("description", volume.Description) + d.Set("size", volume.Size) + d.Set("plan", volume.Plan.Slug) + d.Set("facility", volume.Facility.Code) + d.Set("state", volume.State) + d.Set("billing_cycle", volume.BillingCycle) + d.Set("locked", volume.Locked) + d.Set("created", volume.Created) + d.Set("updated", volume.Updated) + d.Set("project_id", volume.Project.ID) + + snapshot_policies := make([]map[string]interface{}, 0, len(volume.SnapshotPolicies)) + for _, snapshot_policy := range volume.SnapshotPolicies { + policy := map[string]interface{}{ + "snapshot_frequency": snapshot_policy.SnapshotFrequency, + "snapshot_count": snapshot_policy.SnapshotCount, + } + snapshot_policies = append(snapshot_policies, policy) + } + d.Set("snapshot_policies", snapshot_policies) + + attachments := make([]*packngo.VolumeAttachment, 0, len(volume.Attachments)) + for _, attachment := range volume.Attachments { + attachments = append(attachments, attachment) + } + d.Set("attachments", attachments) + + return nil +} + +func resourcePacketVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") { + // the change is true => false, i.e. unlock + if !d.Get("locked").(bool) { + if _, err := client.Volumes.Unlock(d.Id()); err != nil { + return friendlyError(err) + } + } + } + + updateRequest := &packngo.VolumeUpdateRequest{} + + sendAttrUpdate := false + + if d.HasChange("description") { + sendAttrUpdate = true + vDesc := d.Get("description").(string) + updateRequest.Description = &vDesc + } + if d.HasChange("plan") { + sendAttrUpdate = true + vPlan := d.Get("plan").(string) + updateRequest.PlanID = &vPlan + } + if d.HasChange("size") { + sendAttrUpdate = true + vSize := d.Get("size").(int) + updateRequest.Size = &vSize + } + if d.HasChange("billing_cycle") { + sendAttrUpdate = true + vCycle := d.Get("billing_cycle").(string) + updateRequest.BillingCycle = &vCycle + } + + if sendAttrUpdate { + _, _, err := client.Volumes.Update(d.Id(), updateRequest) + if err != nil { + return friendlyError(err) + } + } + if d.HasChange("locked") { + // the change is false => true, i.e. lock + if d.Get("locked").(bool) { + if _, err := client.Volumes.Lock(d.Id()); err != nil { + return friendlyError(err) + } + } + } + + return resourcePacketVolumeRead(d, meta) +} + +func resourcePacketVolumeDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if _, err := client.Volumes.Delete(d.Id()); err != nil { + return friendlyError(err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go new file mode 100644 index 00000000000..1316f793c09 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go @@ -0,0 +1,83 @@ +package packet + +import ( + "log" + "path/filepath" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketVolumeAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketVolumeAttachmentCreate, + Read: resourcePacketVolumeAttachmentRead, + Delete: resourcePacketVolumeAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "volume_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourcePacketVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + dID := d.Get("device_id").(string) + vID := d.Get("volume_id").(string) + log.Printf("[DEBUG] Attaching Volume (%s) to Instance (%s)\n", vID, dID) + va, _, err := client.VolumeAttachments.Create(vID, dID) + if err != nil { + switch err.(type) { + case *packngo.ErrorResponse: + e := err.(*packngo.ErrorResponse) + if len(e.Errors) == 1 { + if e.Errors[0] == "Instance is already attached to this volume" { + log.Printf("[DEBUG] Volume (%s) is already attached to Instance (%s)", vID, dID) + break + } + } + } + return err + } + + d.SetId(va.ID) + return resourcePacketVolumeAttachmentRead(d, meta) +} + +func resourcePacketVolumeAttachmentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + va, _, err := client.VolumeAttachments.Get(d.Id(), nil) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + return err + } + d.Set("device_id", filepath.Base(va.Device.Href)) + d.Set("volume_id", filepath.Base(va.Volume.Href)) + return nil +} + +func resourcePacketVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.VolumeAttachments.Delete(d.Id()) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/utils.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/utils.go new file mode 100644 index 00000000000..4f5bc569139 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/utils.go @@ -0,0 +1,21 @@ +package packet + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func convertStringArr(ifaceArr []interface{}) []string { + var arr []string + for _, v := range ifaceArr { + if v == nil { + continue + } + arr = append(arr, v.(string)) + } + return arr +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 71a8e17993a..7bc4bdea8e7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -779,12 +779,15 @@ github.com/hashicorp/terraform/configs/configupgrade github.com/hashicorp/terraform/configs/hcl2shim github.com/hashicorp/terraform/dag github.com/hashicorp/terraform/experiments +github.com/hashicorp/terraform/flatmap +github.com/hashicorp/terraform/helper/config github.com/hashicorp/terraform/helper/didyoumean github.com/hashicorp/terraform/helper/experiment github.com/hashicorp/terraform/helper/hashcode github.com/hashicorp/terraform/helper/hilmapstructure github.com/hashicorp/terraform/helper/logging github.com/hashicorp/terraform/helper/plugin +github.com/hashicorp/terraform/helper/resource github.com/hashicorp/terraform/helper/schema github.com/hashicorp/terraform/helper/slowmessage github.com/hashicorp/terraform/helper/structure @@ -1066,6 +1069,9 @@ github.com/ovirt/terraform-provider-ovirt/ovirt # github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db ## explicit github.com/packer-community/winrmcp/winrmcp +# github.com/packethost/packngo v0.2.0 +## explicit +github.com/packethost/packngo # github.com/pborman/uuid v1.2.0 ## explicit github.com/pborman/uuid @@ -1428,6 +1434,9 @@ github.com/terraform-providers/terraform-provider-local/local # github.com/terraform-providers/terraform-provider-openstack v1.28.0 ## explicit github.com/terraform-providers/terraform-provider-openstack/openstack +# github.com/terraform-providers/terraform-provider-packet v1.7.2 +## explicit +github.com/terraform-providers/terraform-provider-packet/packet # github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f ## explicit github.com/terraform-providers/terraform-provider-random/random From c5741705ca3e27bf86cb642d89364300df8480a9 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Wed, 24 Jun 2020 16:14:24 -0400 Subject: [PATCH 03/11] stubbing more Packet places Signed-off-by: Marques Johansson --- .gitignore | 2 + .../install.openshift.io_installconfigs.yaml | 17 +++++ data/data/rhcos-amd64.json | 7 ++ data/data/rhcos.json | 7 ++ pkg/asset/cluster/tfvars.go | 32 ++++++++ pkg/asset/installconfig/installconfig.go | 4 + pkg/asset/installconfig/packet/validation.go | 74 +++++++++++++++++++ pkg/asset/installconfig/platform.go | 7 ++ pkg/asset/installconfig/platformcredscheck.go | 7 ++ pkg/asset/installconfig/platformpermscheck.go | 3 +- .../installconfig/platformprovisioncheck.go | 3 +- pkg/tfvars/packet/OWNERS | 7 ++ pkg/tfvars/packet/packet.go | 13 ++++ pkg/types/clustermetadata.go | 5 ++ pkg/types/installconfig.go | 8 ++ pkg/types/machinepools.go | 10 ++- pkg/types/packet/machinepool.go | 30 ++++++++ pkg/types/packet/metadata.go | 5 ++ pkg/types/packet/validation/machinepool.go | 13 ++++ pkg/types/packet/validation/platform.go | 12 +++ 20 files changed, 262 insertions(+), 4 deletions(-) create mode 100644 pkg/asset/installconfig/packet/validation.go create mode 100644 pkg/tfvars/packet/OWNERS create mode 100644 pkg/types/packet/machinepool.go create mode 100644 pkg/types/packet/metadata.go diff --git a/.gitignore b/.gitignore index ae3c1726048..7237464fdf4 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ /bin/ +.openshift_install.log +.vscode diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 0aed19a3e75..0ca80a02a23 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -286,6 +286,12 @@ spec: - high_performance type: string type: object + packet: + description: Packet is the configuration used when installing + on Packet. + properties: {} + # TODO(displague) [properties] + type: object vsphere: description: VSphere is the configuration used when installing on vSphere. @@ -573,6 +579,12 @@ spec: - high_performance type: string type: object + packet: + description: Packet is the configuration used when installing on + Packet. + properties: {} + # TODO(displague) properties + type: object vsphere: description: VSphere is the configuration used when installing on vSphere. @@ -1433,6 +1445,11 @@ spec: - ovirt_cluster_id - ovirt_storage_domain_id type: object + packet: + description: Packet is the configuration used when installing on Packet + properties: {} + # TODO(displague) properties + type: object vsphere: description: VSphere is the configuration used when installing on vSphere. diff --git a/data/data/rhcos-amd64.json b/data/data/rhcos-amd64.json index 9ab37fb874c..e010e1e0ad5 100644 --- a/data/data/rhcos-amd64.json +++ b/data/data/rhcos-amd64.json @@ -133,6 +133,13 @@ "sha256": "e6b570a3559b76ca7350ed5865ebb799bb769a11883e38129326c995603e3aca", "size": 838369280 }, + "packet": { + "path": "rhcos-45.81.202005200134-0-metal.x86_64.raw.gz", + "sha256": "fbbe3f1a6cd60ec0344ca88925efc812c1556654680d8663bffd0663ae55843a", + "size": 878855135, + "uncompressed-sha256": "4a211425bf1af046ffbd91c9c63b0d179db67f9dc4165a4a106a2f0a1c74df7e", + "uncompressed-size": 3807379456 + }, "qemu": { "path": "rhcos-46.82.202007051540-0-qemu.x86_64.qcow2.gz", "sha256": "d74174df0c5813d0eaf5d31742504b775222385810dc8bf90b7a6b4af6c4b5fb", diff --git a/data/data/rhcos.json b/data/data/rhcos.json index 9ab37fb874c..e010e1e0ad5 100644 --- a/data/data/rhcos.json +++ b/data/data/rhcos.json @@ -133,6 +133,13 @@ "sha256": "e6b570a3559b76ca7350ed5865ebb799bb769a11883e38129326c995603e3aca", "size": 838369280 }, + "packet": { + "path": "rhcos-45.81.202005200134-0-metal.x86_64.raw.gz", + "sha256": "fbbe3f1a6cd60ec0344ca88925efc812c1556654680d8663bffd0663ae55843a", + "size": 878855135, + "uncompressed-sha256": "4a211425bf1af046ffbd91c9c63b0d179db67f9dc4165a4a106a2f0a1c74df7e", + "uncompressed-size": 3807379456 + }, "qemu": { "path": "rhcos-46.82.202007051540-0-qemu.x86_64.qcow2.gz", "sha256": "d74174df0c5813d0eaf5d31742504b775222385810dc8bf90b7a6b4af6c4b5fb", diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 328625aa204..d8b6551e782 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -19,6 +19,8 @@ import ( azureprovider "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1" openstackprovider "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + // TODO(displague) This is moving to sigs.k8s.io + "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/ignition" "github.com/openshift/installer/pkg/asset/ignition/bootstrap" @@ -40,6 +42,7 @@ import ( libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" openstacktfvars "github.com/openshift/installer/pkg/tfvars/openstack" ovirttfvars "github.com/openshift/installer/pkg/tfvars/ovirt" + packettfvars "github.com/openshift/installer/pkg/tfvars/packet" vspheretfvars "github.com/openshift/installer/pkg/tfvars/vsphere" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/aws" @@ -50,6 +53,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -483,6 +487,34 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), Data: data, }) + case packet.Name: + /* + config, err := packetconfig.NewConfig() + if err != nil { + return err + } + con, err := packetconfig.NewConnection() + if err != nil { + return err + } + */ + // TODO(displague) Packet networking + + /* + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + */ + + data, err := packettfvars.TFVars(packettfvars.TFVarsSources{}) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: fmt.Sprintf(TfPlatformVarsFileName, platform), + Data: data, + }) case vsphere.Name: controlPlanes, err := mastersAsset.Machines() if err != nil { diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index 95587806696..4175fbb0f6d 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -15,6 +15,7 @@ import ( icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" icopenstack "github.com/openshift/installer/pkg/asset/installconfig/openstack" icovirt "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + icpacket "github.com/openshift/installer/pkg/asset/installconfig/packet" icvsphere "github.com/openshift/installer/pkg/asset/installconfig/vsphere" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/conversion" @@ -84,6 +85,7 @@ func (a *InstallConfig) Generate(parents asset.Parents) error { a.Config.GCP = platform.GCP a.Config.BareMetal = platform.BareMetal a.Config.Ovirt = platform.Ovirt + a.Config.Packet = platform.Packet return a.finish("") } @@ -186,6 +188,8 @@ func (a *InstallConfig) platformValidation() error { } if a.Config.Platform.OpenStack != nil { return icopenstack.Validate(a.Config) + if a.Config.Platform.Packet != nil { + return icpacket.Validate(a.Config) } return field.ErrorList{}.ToAggregate() } diff --git a/pkg/asset/installconfig/packet/validation.go b/pkg/asset/installconfig/packet/validation.go new file mode 100644 index 00000000000..1e6255bf3c2 --- /dev/null +++ b/pkg/asset/installconfig/packet/validation.go @@ -0,0 +1,74 @@ +package packet + +import ( + packngo "github.com/packethost/packngo" + "github.com/pkg/errors" + "gopkg.in/AlecAivazis/survey.v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/packet" + "github.com/openshift/installer/pkg/types/packet/validation" +) + +// Validate executes packet specific validation +func Validate(ic *types.InstallConfig) error { + allErrs := field.ErrorList{} + packetPlatformPath := field.NewPath("platform", "packet") + + if ic.Platform.Packet == nil { + return errors.New(field.Required( + packetPlatformPath, + "validation requires a Engine platform configuration").Error()) + } + + allErrs = append( + allErrs, + validation.ValidatePlatform(ic.Platform.Packet, packetPlatformPath)...) + + con, err := packngo.NewClient() + if err != nil { + return err + } + + // TODO(displague) validate networks + + if ic.ControlPlane != nil && ic.ControlPlane.Platform.Packet != nil { + allErrs = append( + allErrs, + validateMachinePool(con, field.NewPath("controlPlane", "platform", "packet"), ic.ControlPlane.Platform.Packet)...) + } + for idx, compute := range ic.Compute { + fldPath := field.NewPath("compute").Index(idx) + if compute.Platform.Packet != nil { + allErrs = append( + allErrs, + validateMachinePool(con, fldPath.Child("platform", "packet"), compute.Platform.Packet)...) + } + } + + return allErrs.ToAggregate() +} + +func validateMachinePool(con *packngo.Client, child *field.Path, pool *packet.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} + +// authenticated takes an packet platform and validates +// its connection to the API by establishing +// the connection and authenticating successfully. +// The API connection is closed in the end and must leak +// or be reused in any way. +func authenticated(c *Config) survey.Validator { + return func(val interface{}) error { + _, err := packngo.NewClient() + + if err != nil { + return errors.Errorf("failed to construct connection to Engine platform %s", err) + } + + return nil + } + +} diff --git a/pkg/asset/installconfig/platform.go b/pkg/asset/installconfig/platform.go index e542efa209c..e5416f76d04 100644 --- a/pkg/asset/installconfig/platform.go +++ b/pkg/asset/installconfig/platform.go @@ -14,6 +14,7 @@ import ( libvirtconfig "github.com/openshift/installer/pkg/asset/installconfig/libvirt" openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + packetconfig "github.com/openshift/installer/pkg/asset/installconfig/packet" vsphereconfig "github.com/openshift/installer/pkg/asset/installconfig/vsphere" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/aws" @@ -23,6 +24,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -79,6 +81,11 @@ func (a *platform) Generate(asset.Parents) error { if err != nil { return err } + case packet.Name: + a.Packet, err = packetconfig.Platform() + if err != nil { + return err + } case ovirt.Name: a.Ovirt, err = ovirtconfig.Platform() if err != nil { diff --git a/pkg/asset/installconfig/platformcredscheck.go b/pkg/asset/installconfig/platformcredscheck.go index 2fedfeb9e22..7db0503777c 100644 --- a/pkg/asset/installconfig/platformcredscheck.go +++ b/pkg/asset/installconfig/platformcredscheck.go @@ -10,6 +10,7 @@ import ( gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + packetconfig "github.com/openshift/installer/pkg/asset/installconfig/packet" "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/baremetal" @@ -18,6 +19,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -72,6 +74,11 @@ func (a *PlatformCredsCheck) Generate(dependencies asset.Parents) error { if err != nil { return errors.Wrap(err, "testing Engine connection") } + case packet.Name: + _, err := packetconfig.NewConnection() + if err != nil { + return errors.Wrap(err, "creating Engine connection") + } default: err = fmt.Errorf("unknown platform type %q", platform) } diff --git a/pkg/asset/installconfig/platformpermscheck.go b/pkg/asset/installconfig/platformpermscheck.go index bb9f199c882..6fcbbc3140d 100644 --- a/pkg/asset/installconfig/platformpermscheck.go +++ b/pkg/asset/installconfig/platformpermscheck.go @@ -17,6 +17,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -72,7 +73,7 @@ func (a *PlatformPermsCheck) Generate(dependencies asset.Parents) error { if err = gcpconfig.ValidateEnabledServices(ctx, client, ic.Config.GCP.ProjectID); err != nil { return errors.Wrap(err, "failed to validate services in this project") } - case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, vsphere.Name: + case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, packet.Name, vsphere.Name: // no permissions to check default: err = fmt.Errorf("unknown platform type %q", platform) diff --git a/pkg/asset/installconfig/platformprovisioncheck.go b/pkg/asset/installconfig/platformprovisioncheck.go index 1a311a87b77..0835e058847 100644 --- a/pkg/asset/installconfig/platformprovisioncheck.go +++ b/pkg/asset/installconfig/platformprovisioncheck.go @@ -17,6 +17,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -70,7 +71,7 @@ func (a *PlatformProvisionCheck) Generate(dependencies asset.Parents) error { if err != nil { return err } - case aws.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name: + case aws.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, packet.Name: // no special provisioning requirements to check default: err = fmt.Errorf("unknown platform type %q", platform) diff --git a/pkg/tfvars/packet/OWNERS b/pkg/tfvars/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/pkg/tfvars/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/pkg/tfvars/packet/packet.go b/pkg/tfvars/packet/packet.go index e69de29bb2d..81d8c0704e5 100644 --- a/pkg/tfvars/packet/packet.go +++ b/pkg/tfvars/packet/packet.go @@ -0,0 +1,13 @@ +package packet + +import "encoding/json" + +// TFVarsSources contains the parameters to be converted into Terraform variables +type TFVarsSources struct { +} + +//TFVars generate Packet-specific Terraform variables +func TFVars(sources TFVarsSources) ([]byte, error) { + // TODO(displague) fill in the tf vars + return json.MarshalIndent(struct{}{}, "", " ") +} diff --git a/pkg/types/clustermetadata.go b/pkg/types/clustermetadata.go index 31084ffcdeb..2500d6da262 100644 --- a/pkg/types/clustermetadata.go +++ b/pkg/types/clustermetadata.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -32,6 +33,7 @@ type ClusterPlatformMetadata struct { GCP *gcp.Metadata `json:"gcp,omitempty"` BareMetal *baremetal.Metadata `json:"baremetal,omitempty"` Ovirt *ovirt.Metadata `json:"ovirt,omitempty"` + Packet *packet.Metadata `json:"packet,omitempty"` VSphere *vsphere.Metadata `json:"vsphere,omitempty"` } @@ -63,6 +65,9 @@ func (cpm *ClusterPlatformMetadata) Platform() string { if cpm.Ovirt != nil { return ovirt.Name } + if cpm.Packet != nil { + return packet.Name + } if cpm.VSphere != nil { return vsphere.Name } diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index 5f6f4c085a1..b667dc70feb 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -12,6 +12,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,6 +34,7 @@ var ( gcp.Name, openstack.Name, ovirt.Name, + packet.Name, vsphere.Name, } // HiddenPlatformNames is a slice with all the @@ -176,6 +178,10 @@ type Platform struct { // +optional OpenStack *openstack.Platform `json:"openstack,omitempty"` + // Packet is the configuration used when installing on Packet. + // +optional + Packet *packet.Platform `json:"packet,omitempty"` + // VSphere is the configuration used when installing on vSphere. // +optional VSphere *vsphere.Platform `json:"vsphere,omitempty"` @@ -206,6 +212,8 @@ func (p *Platform) Name() string { return none.Name case p.OpenStack != nil: return openstack.Name + case p.Packet != nil: + return packet.Name case p.VSphere != nil: return vsphere.Name case p.Ovirt != nil: diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index d385327516d..ae90e14df4b 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -90,6 +91,9 @@ type MachinePoolPlatform struct { // Ovirt is the configuration used when installing on oVirt. Ovirt *ovirt.MachinePool `json:"ovirt,omitempty"` + + // Packet is the configuration used when installing on Packet. + Packet *packet.MachinePool `json:"packet,omitempty"` } // Name returns a string representation of the platform (e.g. "aws" if @@ -111,10 +115,12 @@ func (p *MachinePoolPlatform) Name() string { return libvirt.Name case p.OpenStack != nil: return openstack.Name - case p.VSphere != nil: - return vsphere.Name case p.Ovirt != nil: return ovirt.Name + case p.VSphere != nil: + return vsphere.Name + case p.Packet != nil: + return packet.Name default: return "" } diff --git a/pkg/types/packet/machinepool.go b/pkg/types/packet/machinepool.go new file mode 100644 index 00000000000..f518d6110d8 --- /dev/null +++ b/pkg/types/packet/machinepool.go @@ -0,0 +1,30 @@ +package packet + +// MachinePool stores the configuration for a machine pool installed +// on packet. +type MachinePool struct { +} + +// Disk defines a BM disk +type Disk struct { + // SizeGB size of the bootable disk in GiB. + SizeGB int64 `json:"sizeGB"` +} + +// CPU defines the BM cpu, made of (Sockets * Cores). +type CPU struct { + // Sockets is the number of sockets for a BM. + // Total CPUs is (Sockets * Cores) + Sockets int32 `json:"sockets"` + + // Cores is the number of cores per socket. + // Total CPUs is (Sockets * Cores) + Cores int32 `json:"cores"` +} + +// Set sets the values from `required` to `p`. +func (p *MachinePool) Set(required *MachinePool) { + if required == nil || p == nil { + return + } +} diff --git a/pkg/types/packet/metadata.go b/pkg/types/packet/metadata.go new file mode 100644 index 00000000000..7475843bede --- /dev/null +++ b/pkg/types/packet/metadata.go @@ -0,0 +1,5 @@ +package packet + +// Metadata contains packet metadata (e.g. for uninstalling the cluster). +type Metadata struct { +} diff --git a/pkg/types/packet/validation/machinepool.go b/pkg/types/packet/validation/machinepool.go index 958ae1a6226..32c26580e6f 100644 --- a/pkg/types/packet/validation/machinepool.go +++ b/pkg/types/packet/validation/machinepool.go @@ -1 +1,14 @@ package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/packet" +) + +// ValidateMachinePool checks that the specified machine pool is valid. +func ValidateMachinePool(p *packet.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + return allErrs +} diff --git a/pkg/types/packet/validation/platform.go b/pkg/types/packet/validation/platform.go index 958ae1a6226..daf758f506e 100644 --- a/pkg/types/packet/validation/platform.go +++ b/pkg/types/packet/validation/platform.go @@ -1 +1,13 @@ package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/packet" +) + +// ValidatePlatform checks that the specified platform is valid. +func ValidatePlatform(p *packet.Platform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} From 5aceec6b98347b6cc2806b1a509e538dd8df32ff Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Fri, 10 Jul 2020 14:20:50 -0400 Subject: [PATCH 04/11] change packet config and platform shapes --- data/data/packet/bootstrap/versions.tf | 3 + data/data/packet/dns/versions.tf | 3 + data/data/packet/main.tf | 71 +++++++++++ data/data/packet/master/versions.tf | 3 + data/data/packet/variables-packet.tf | 102 ++++++++++++++++ data/data/packet/versions.tf | 3 + data/data/packet/vnet/versions.tf | 3 + pkg/asset/cluster/packet/OWNERS | 7 ++ pkg/asset/cluster/packet/packet.go | 13 ++ pkg/asset/installconfig/packet/client.go | 52 ++++++-- pkg/asset/installconfig/packet/config.go | 9 +- pkg/asset/installconfig/packet/packet.go | 111 ++++++++++++++++-- pkg/types/packet/platform.go | 6 +- .../api/config/v1/types_infrastructure.go | 24 ++++ 14 files changed, 389 insertions(+), 21 deletions(-) create mode 100644 pkg/asset/cluster/packet/OWNERS diff --git a/data/data/packet/bootstrap/versions.tf b/data/data/packet/bootstrap/versions.tf index e69de29bb2d..d9b6f790b92 100644 --- a/data/data/packet/bootstrap/versions.tf +++ b/data/data/packet/bootstrap/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/packet/dns/versions.tf b/data/data/packet/dns/versions.tf index e69de29bb2d..d9b6f790b92 100644 --- a/data/data/packet/dns/versions.tf +++ b/data/data/packet/dns/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/packet/main.tf b/data/data/packet/main.tf index e69de29bb2d..b2446cc4316 100644 --- a/data/data/packet/main.tf +++ b/data/data/packet/main.tf @@ -0,0 +1,71 @@ +provider "packet" { + auth_token = var.packet_auth_token +} + +provider "cloudflare" { + email = var.packet_cf_email + api_key = var.packet_cf_api_key +} + +module "bastion" { + + source = "./modules/bastion" + auth_token = var.auth_token + project_id = var.project_id + facility = var.facility + plan = var.plan_master + operating_system = var.bastion_operating_system + ssh_private_key_path = var.ssh_private_key_path + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + cf_zone_id = var.cf_zone_id + ocp_version = var.ocp_version + ocp_version_zstream = var.ocp_version_zstream + //depends = [module.prepare_openshift.finished] +} + +module "dns_lb" { + source = "./modules/dns" + + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + cf_zone_id = var.cf_zone_id + node_type = "lb" + node_ips = tolist([module.bastion.lb_ip]) +} + +module "prepare_openshift" { + + source = "./modules/prereq" + + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + ocp_version = var.ocp_version + count_master = var.count_master + count_compute = var.count_compute + ssh_public_key_path = var.ssh_public_key_path + ssh_private_key_path = var.ssh_private_key_path + bastion_ip = module.bastion.lb_ip + ocp_api_token = var.ocp_cluster_manager_token + depends = [module.bastion.finished] +} + +module "openshift_install" { + source = "./modules/install" + + ssh_private_key_path = var.ssh_private_key_path + operating_system = var.bastion_operating_system + bastion_ip = module.bastion.lb_ip + count_master = var.count_master + count_compute = var.count_compute + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + bootstrap_ip = module.openshift_bootstrap.node_ip + master_ips = module.openshift_masters.node_ip + worker_ips = module.openshift_workers.node_ip + depends = [module.openshift_masters.node_ip, module.openshift_workers.node_ip] + + ocp_storage_nfs_enable = var.ocp_storage_nfs_enable + ocp_storage_ocs_enable = var.ocp_storage_ocs_enable + ocp_virtualization_enable = var.ocp_virtualization_enable +} \ No newline at end of file diff --git a/data/data/packet/master/versions.tf b/data/data/packet/master/versions.tf index e69de29bb2d..d9b6f790b92 100644 --- a/data/data/packet/master/versions.tf +++ b/data/data/packet/master/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/packet/variables-packet.tf b/data/data/packet/variables-packet.tf index e69de29bb2d..06436ccce0d 100644 --- a/data/data/packet/variables-packet.tf +++ b/data/data/packet/variables-packet.tf @@ -0,0 +1,102 @@ +variable "packet_cf_email" { + description = "Your Cloudflare email address" +} + +variable "packet_cf_api_key" { + description = "Your Cloudflare API key" +} + +variable "packet_cf_zone_id" { + description = "Your Cloudflare Zone" +} + +variable "packet_cluster_basedomain" { + description = "Your Cloudflare Base domain for your cluster" +} + + +variable "packet_auth_token" { + description = "Your Packet API key" +} + +variable "packet_project_id" { + description = "Your Packet Project ID" +} + +variable "packet_ssh_private_key_path" { + description = "Your SSH private key path (used locally only)" + default = "~/.ssh/id_rsa" +} + +variable "packet_ssh_public_key_path" { + description = "Your SSH public key path (used for install-config.yaml)" + default = "~/.ssh/id_rsa.pub" +} + +variable "packet_bastion_operating_system" { + description = "Your preferred bastion operating systems (RHEL or CentOS)" + default = "rhel_7" +} + +variable "packet_facility" { + description = "Your primary facility" + default = "dfw2" +} + +variable "packet_plan_master" { + description = "Plan for Master Nodes" + default = "c3.medium.x86" +} + +variable "packet_plan_compute" { + description = "Plan for Compute Nodes" + default = "c2.medium.x86" +} + +variable "packet_count_bootstrap" { + default = "1" + description = "Number of Master Nodes." +} + +variable "packet_count_master" { + default = "3" + description = "Number of Master Nodes." +} + +variable "packet_count_compute" { + default = "2" + description = "Number of Compute Nodes" +} + +variable "packet_cluster_name" { + default = "jr" + description = "Cluster name label" +} + +variable "packet_ocp_version" { + default = "4.4" + description = "OpenShift minor release version" +} + +variable "packet_ocp_version_zstream" { + default = "3" + description = "OpenShift zstream version" +} + +variable "packet_ocp_cluster_manager_token" { + description = "OpenShift Cluster Manager API Token used to generate your pullSecret (https://cloud.redhat.com/openshift/token)" +} + +variable "packet_ocp_storage_nfs_enable" { + description = "Enable configuration of NFS and NFS-related k8s provisioner/storageClass" + default = true +} +variable "packet_ocp_storage_ocs_enable" { + description = "Enable installation of OpenShift Container Storage via operator. This requires a minimum of 3 worker nodes" + default = false +} + +variable "packet_ocp_virtualization_enable" { + description = "Enable installation of OpenShift Virtualization via operator. This requires storage provided by OCS, NFS, and/or hostPath provisioner(s)" + default = false +} \ No newline at end of file diff --git a/data/data/packet/versions.tf b/data/data/packet/versions.tf index e69de29bb2d..d9b6f790b92 100644 --- a/data/data/packet/versions.tf +++ b/data/data/packet/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/data/data/packet/vnet/versions.tf b/data/data/packet/vnet/versions.tf index e69de29bb2d..d9b6f790b92 100644 --- a/data/data/packet/vnet/versions.tf +++ b/data/data/packet/vnet/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/pkg/asset/cluster/packet/OWNERS b/pkg/asset/cluster/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/pkg/asset/cluster/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/pkg/asset/cluster/packet/packet.go b/pkg/asset/cluster/packet/packet.go index e69de29bb2d..559b3b7f094 100644 --- a/pkg/asset/cluster/packet/packet.go +++ b/pkg/asset/cluster/packet/packet.go @@ -0,0 +1,13 @@ +// Package packet extracts packet metadata from install configurations. +package packet + +import ( + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/packet" +) + +// Metadata converts an install configuration to ovirt metadata. +func Metadata(config *types.InstallConfig) *packet.Metadata { + m := packet.Metadata{} + return &m +} diff --git a/pkg/asset/installconfig/packet/client.go b/pkg/asset/installconfig/packet/client.go index 01a6ff9c62d..5fd3d498d61 100644 --- a/pkg/asset/installconfig/packet/client.go +++ b/pkg/asset/installconfig/packet/client.go @@ -1,19 +1,53 @@ package packet import ( + "context" + packngo "github.com/packethost/packngo" "github.com/pkg/errors" ) -// getConnection is a convenience method to get a connection to packet api -// form a Config Object. -func getConnection(_ Config) (*packngo.Client, error) { - // TODO(displague) NewClientWith... - con, err := packngo.NewClient() - if err != nil { - return nil, err - } - return con, nil +const ( + PACKET_CONSUMER_TOKEN = "redhat openshift ipi" +) + +//go:generate mockgen -source=./client.go -destination=mock/packet_generated.go -package=mock + +// API represents the calls made to the API. +type API interface { + ListProjects(ctx context.Context) ([]packngo.Project, error) + ListFacilities(ctx context.Context) ([]packngo.Facility, error) + ListPlans(ctx context.Context) ([]packngo.Plan, error) +} + +type Client struct { + OrganizationID string + FacilityID string + ProjectID string + + Conn *packngo.Client +} + +func (c *Client) ListProjects(ctx context.Context) ([]packngo.Project, error) { + return nil, nil +} + +func (c *Client) ListFacilities(ctx context.Context) ([]packngo.Facility, error) { + return nil, nil +} + +func (c *Client) ListPlans(ctx context.Context) ([]packngo.Plan, error) { + return nil, nil +} + +var _ API = &Client{} + +// getConnection is a convenience method to get a Packet API client +// from a Config Object. +func getConnection(c Config) (*packngo.Client, error) { + return packngo.NewClientWithBaseURL( + PACKET_CONSUMER_TOKEN, c.APIKey, nil, c.APIURL, + ) } // NewConnection returns a new client connection to Packet's API endpoint. diff --git a/pkg/asset/installconfig/packet/config.go b/pkg/asset/installconfig/packet/config.go index e91035673a8..e6782e72c36 100644 --- a/pkg/asset/installconfig/packet/config.go +++ b/pkg/asset/installconfig/packet/config.go @@ -12,15 +12,20 @@ var defaultPacketConfigEnvVar = "PACKET_CONFIG" // TODO(displague) what is the preferred config for Packet projects? support // both yaml and json? -var defaultPacketConfigPath = filepath.Join(os.Getenv("HOME"), "packet-config.yaml") +var defaultPacketConfigPath = filepath.Join(os.Getenv("HOME"), ".packet-config.yaml") // Config holds Packet api access details type Config struct { + // APIURL is the Base URL for accessing the Packet API (https://api.packet.com/) + APIURL string `json:"api_url,omitempty"` + + // APIKey is the User or Project API Key used to authenticate requests to the Packet API + APIKey string `json:"api_key,omitempty"` } // LoadPacketConfig from the following location (first wins): // 1. PACKET_CONFIG env variable -// 2 $defaultPacketConfigPath +// 2. $defaultPacketConfigPath // See #@Config for the expected format func LoadPacketConfig() ([]byte, error) { data, err := ioutil.ReadFile(discoverPath()) diff --git a/pkg/asset/installconfig/packet/packet.go b/pkg/asset/installconfig/packet/packet.go index 95c175f6815..23ea236f51c 100644 --- a/pkg/asset/installconfig/packet/packet.go +++ b/pkg/asset/installconfig/packet/packet.go @@ -5,30 +5,123 @@ import ( survey "gopkg.in/AlecAivazis/survey.v1" "github.com/openshift/installer/pkg/types/packet" - packetdefaults "github.com/openshift/installer/pkg/types/packet/defaults" "github.com/openshift/installer/pkg/validate" ) +const ( + DefaultFacility = "EWR1" // Parsippany, NJ, US +) + // Platform collects packet-specific configuration. func Platform() (*packet.Platform, error) { - var uri string + facilityCode, err := selectFacility() + if err != nil { + return nil, err + } + + projectID, err := selectProject() + if err != nil { + return nil, err + } + + return &packet.Platform{ + FacilityCode: facilityCode, + ProjectID: projectID, + }, nil +} + +func selectProject() (string, error) { + var projectID string + /* + //TODO(displague) offer a mapping of project names to project ids, project + // names may be duplicated between organizations so names may not be unique. + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + conn, err := getConnection(Config{APIKey: apiKey, APIURL: apiURL}) + if err != nil { + return nil, errors.Wrap(err, "failed to create Packet connection") + } + + + client := &Client{Conn: conn} + projects, err := client.ListProjects(ctx) + + if err != nil { + return nil, errors.Wrap(err, "failed to list Packet projects") + } + + projectNames := []string{} + for _, p := range projects { + projectNames = append(projectNames, p.Name) + } + */ + + err := survey.Ask([]*survey.Question{{ + Prompt: &survey.Input{ + Message: "Packet Project ID", + Help: "The Packet project id to use for installation", + }, + }}, &projectID) + + if err != nil { + return "", err + } + + return projectID, nil +} + +func selectFacility() (string, error) { + var facilityID string err := survey.Ask([]*survey.Question{ - // TODO(displague) ask the right questions { Prompt: &survey.Input{ - Message: "Packet Connection URI", - Help: "The packet connection URI to be used. This must be accessible from the running cluster.", - Default: packetdefaults.DefaultURI, + Message: "Packet Facility Code", + Help: "The Packet Facility code (this is the short name, e.g. 'ewr1')", + Default: DefaultFacility, + }, + }, + }, &facilityID) + if err != nil { + return "", err + } + return facilityID, nil +} + +func askForConfig() (*Config, error) { + var apiURL, apiKey string + + err := survey.Ask([]*survey.Question{ + { + Prompt: &survey.Input{ + Message: "Packet API URL", + Help: "The base URL for accessing the Packet API", + Default: "https://api.packet.com", }, Validate: survey.ComposeValidators(survey.Required, uriValidator), }, - }, &uri) + }, apiURL) + if err != nil { + return nil, err + } + + err = survey.Ask([]*survey.Question{ + { + Prompt: &survey.Password{ + Message: "Packet API Key", + Help: "The User or Project Packet API Key to access the Packet API", + }, + }, + }, &apiKey) if err != nil { return nil, err } - return &packet.Platform{}, nil - // TODO(displague) fill in the params + return &Config{ + APIKey: apiKey, + APIURL: apiURL, + }, nil } // uriValidator validates if the answer provided in prompt is a valid diff --git a/pkg/types/packet/platform.go b/pkg/types/packet/platform.go index 4a41e9d482a..4ee92d5c758 100644 --- a/pkg/types/packet/platform.go +++ b/pkg/types/packet/platform.go @@ -1,5 +1,9 @@ package packet type Platform struct { - // TODO(displague) properties of the platform, token, project, image, network, etc + // FacilityCode represents the Packet region and datacenter where your devices will be provisioned (https://www.packet.com/developers/docs/getting-started/facilities/) + FacilityCode string `json:"facility_code,omitempty"` + + // ProjectID represents the Packet project used for logical grouping and invoicing (https://www.packet.com/developers/docs/API/getting-started/) + ProjectID string `json:"project_id,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 9c9dd2b6fed..a264874a691 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -112,6 +112,9 @@ const ( // IBMCloudPlatformType represents IBM Cloud infrastructure. IBMCloudPlatformType PlatformType = "IBMCloud" + + // PacketPlatformType represents Packet infrastructure. + PacketPlatformType PlatformType = "Packet" ) // IBMCloudProviderType is a specific supported IBM Cloud provider cluster type @@ -172,6 +175,10 @@ type PlatformSpec struct { // IBMCloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // Packet contains settings specific to the Packet infrastructure provider. + // +optional + Packet *PacketPlatformSpec `json:"packet,omitempty"` } // PlatformStatus holds the current status specific to the underlying infrastructure provider @@ -222,6 +229,10 @@ type PlatformStatus struct { // IBMCloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` + + // Packet contains settings specific to the Packet infrastructure provider. + // +optional + Packet *PacketPlatformStatus `json:"platform,omitempty"` } // AWSServiceEndpoint store the configuration of a custom url to @@ -438,6 +449,19 @@ type IBMCloudPlatformStatus struct { ProviderType IBMCloudProviderType `json:"providerType,omitempty"` } +// PacketPlatformSpec holds the desired state of the Packet Platform infrastructure provider. +// This only includes fields that can be modified in the cluster. +type PacketPlatformSpec struct{} + +// PacketPlatformStatus holds the current status of the Packet Platform infrastructure provider. +type PacketPlatformStatus struct { + // ProjectID for new Packet resources created for the cluster. + ProjectID string `json:"projectID"` + + // Facility for new Packet resources created for the cluster. + Facility string `json:"facility"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InfrastructureList is From e6496262ab61c7f6e5a93417b7c9fee96f59f70c Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Tue, 22 Jan 2019 14:05:45 -0500 Subject: [PATCH 05/11] doc: Begin a document on adding a new OpenShift platform This covers the minimal steps and process to go from "nothing" to "OpenShift is fully capable of running on your platform". Heavily work in progress, but should capture the why, our support levels, and our target config, as well as mechanical steps to get down the line. --- docs/dev/adding-new-platform.md | 198 ++++++++++++++++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 docs/dev/adding-new-platform.md diff --git a/docs/dev/adding-new-platform.md b/docs/dev/adding-new-platform.md new file mode 100644 index 00000000000..724b98c3d2b --- /dev/null +++ b/docs/dev/adding-new-platform.md @@ -0,0 +1,198 @@ +How to add a new platform to OpenShift +====================================== + +This document describes the process for adding a new platform to OpenShift in stages. Because there are many components to an automated platform, the process is defined in terms of delivering specific levels of functionality over time. + +Terminology: + +* `User Provided Infrastructure (UPI)` - The documentation and enablement that describes how to launch OpenShift on a particular platform following the supported best practices. +* `Install Provided Infrastructure (IPI)` - Infrastructure created as part of the install process following the best practices on a platform. IPI may support options that allow portions of the infrastructure to be user-provided. +* `Cloud Provider` - The set of controllers in OpenShift that automatically manage storage, networking, and host failure detection by invoking infrastructure APIs. +* `Dynamic Compute` - The 4.x cloud provider feature that allows OpenShift to automatically manage creating, deleting, and scaling nodes by invoking infrastructure APIs. Exposed via the Machine API (`Machine`, `MachineSet`, and `MachineDeployment`) and implemented per platform via an `actuator` controller. + +The general order of adding a new platform for OpenShift is: + +### Enable core platform + +1. **Boot** - Ensure RH CoreOS boots on the desired platform, that Ignition works, and that you have VM / machine images to test with +2. **Arch** - Identify the correct opinionated configuration for a desired platform supporting the default features. +3. **CI** - Identify credentials and setup for a CI environment, ensure those credentials exist and can be used in the CI enviroment +4. **Name** - Identify and get approved the correct naming for adding a new platform to the core API objects (specifically the [infrastructure config](https://github.com/openshift/api/blob/master/config/v1/types_infrastructure.go) and the installer config (https://github.com/openshift/installer/blob/master/pkg/types/aws/doc.go)) so that we are consistent +5. **Enable Provisioning** Add a hidden installer option to this repo for the desired platform as a PR and implement the minimal features for bootstrap as well as a reliable teardown +6. **Enable Platform** Ensure all operators treat your platform as a no-op +7. **CI Job** Add a new CI job to the installer that uses the credentials above to run the installer against the platform and correctly tear down resources +8. **Publish Images** Ensure RH CoreOS images on the platform are being published to a location CI can test +9. **Merge** Merge the platform PR to the installer with a passing platform specific CI job + +At this point the platform is said to be an `unsupported IPI` (installer provided infrastructure) install - work can begin enabling in other repositories. Once these steps have been completed and official documentation is available, the platform can said to be `supported UPI without cloud provider` (user provided infrastructure) for the set of options in **Arch**. + +### Enable component infrastructure management + +Once the platform can be launched and tested, system features must be implemented. The sections below are roughly independent: + +* General requirements: + * Replace the installer terraform destroy with one that doesn't rely on terraform state + * Ensure the installer IPI support is consistent with other platform features (private config, etc) + * Not all platforms will support all features, so IPI is taken to be a spectrum of support + * Enable a CI job that verifies the e2e suite for the given platform runs successfully +* Requirements for dynamic storage and dynamic load balancing + * Ensure the cloud provider is enabled in Kubernetes for your platform (this is required for `supported UPI with cloud provider`) + * Enable cluster-storage-operator to set the correct default storage class +* Requirements for dynamic compute: + * Enable the cloud credential operator for the platform to subdivide access for individual operators + * Enable dynamic compute (MachineSets) by adding a cloud actuator for that platform +* Requirements for dynamic ingress and images: + * Enable cluster-image-registry-operator to provision a storage bucket (if your platform supports object storage) + * Enable cluster-ingress-operator to provision the wildcard domain names + +At this point the platform is said to be a `supported IPI with Dynamic Compute` if the platform supports +MachineSets, or `supported IPI without Dynamic Compute` if it does not. + + +OpenShift Architectural Design +------------------------------ + +OpenShift 4 combines Kubernetes, fully-managed host lifecycle, dynamic infrastructure management, and a comprehensive set of fully-automated platform extensions that can be upgraded and managed uniformly. The foundation of the platform is Red Hat CoreOS, an immutable operating system based on RHEL that is capable of acting as a fully-integrated part of the cluster. The Kubernetes control plane is hosted on the cluster, along with a number of other fundamental extensions like cluster networking, ingress, storage, and application development tooling. Each of those extensions is fully managed on-cluster via a cluster operator that reacts to top level global configuration APIs and can automatically reconfigure the affected components. The Operator Lifecycle Manager (OLM) allows additional ecosystem operators to be installed, upgraded and managed. All of these components - from the operating system kernel to the web console - are part of a unified update lifecycle under the top level Cluster Version Operator which offers worry free rolling updates of the entire infrastructure. + +### Core configuration + +An OpenShift cluster programs the infrastructure it runs on to provide operational simplicity. For every platform, the minimum requirements are: + +1. The control plane nodes: + 1. Run RH CoreOS, allowing in-place updates + 2. Are fronted by a load balancer that allows raw TCP connections to port 6443 and exposes port 443 + 3. Have low latency interconnections connections (<5ms RTT) and persistent disks that survive reboot and are provisoned for at least 300 IOPS + 4. Have cloud or infrastructure firewall rules that at minimum allow the standard ports to be opened (see AWS provider) + 5. Do *not* have automatic cloud provider permissions to perform infrastructure API calls + 6. Have a domain name pointing to the load balancer IP(s) that is `api.` + 7. Has an internal DNS CNAME pointing to each master called `etcd-N.` that + 8. Has an optional internal load balancer that TCP load balances all master nodes, with a DNS name `internal-api.` pointing to the load balancer. +2. The bootstrap node: + 1. Runs RH CoreOS + 2. Is reachable by control plane nodes over the network + 3. Is part of the control plane load balancer until it is removed + 4. Can reach a network endpoint that hosts the bootstrap ignition file securely, or has the bootstrap ignition injected +3. All other compute nodes: + 1. Must be able to reach the internal IPs reported by the master nodes directly + 2. Have cloud or infrastructure firewall rules that at minimum allow ports 4789, 6443, 9000-10000, and 10250-10255 to be reachable + +The following clarifications to configurations are noted: + +1. The control plane load balancer does not need to be exposed to the public internet, but the DNS entry must be visible from the location the installer is run. +2. Master nodes are not required to expose external IPs for SSH access, but can instead allow SSH from a bastion inside a protected network. +3. Compute nodes do not require external IPs + +For dynamic infrastructure, the following permissions are required to be provided as part of the install: + +1. Service LoadBalancer - Load balancers can be created and removed, infastructure nodes can be queried +2. Dynamic Storage - New volumes can be created, deleted, attached, and detached from nodes. Snapshot creation is optional if the platform supports snapshotting +3. Dynamic Compute - New instances can be created, deleted, and restarted inside of the cluster's network / infrastructure, plus any platform specific constructs like programming instance groups for master load balancing on GCP. + + +Booting RH CoreOS +----------------- + +Red Hat CoreOS uses ignition to receive initial configuration from a remote source. Ignition has platform specific behavior to read that configuration that is determined by the `oemID` embedded in the VM image. + +To boot RHCoS to a new platform, you must: + +1. Ensure [ignition](https://github.com/coreos/ignition) supports that platform +2. Ensure that RHCoS has any necessary platform specific code to communicate with the platform (for instance, on Azure the instance must periodically health check) - see [cloud support tracker on Fedora CoreOS for more info](https://github.com/coreos/fedora-coreos-tracker/issues/95). +3. Have a RHCoS image with the appropriate oemID tag set. + +There is a script that assists you in converting the generic VM image to have a specific oemID set in the [coreos-assembler repo as gf-oemid](https://github.com/coreos/coreos-assembler/blob/master/src/gf-oemid). See the instructions there to create an image with the appropriate ID. + +Once you have uploaded the image to your platform, and the machine stays up, you can begin porting the installer to have a minimal IPI. + + +Continuous Integration +---------------------- + +To enable a new platform, require a core continuous integration testing loop that verifies that new changes do not regress our support for the platform. The minimum steps required are: + +1. Have an infrastructure that can receive API calls from the OpenShift CI system to provision/destroy instances +2. Support at minimum 3 concurrent clusters on that infrastructure as "per release image" testing (https://origin-release.svc.ci.openshift.org) that verify a release supports that platform +3. Also support a per-PR target that can be selectively run on the installer, core, and operator repositories in OpenShift in order to allow developers to test incremental changes to those components + +No PR will be merged to openshift/installer for platform support that cannot satisfy the above steps. + + +Naming +------ + +The platform name will be part of our public API and must go through standard API review. The name +should be consistent with common usage of the platform and be recognizable to a consumer. + +The following names for platforms are good examples of what is expected: + +* Amazon Web Services -> `aws` or `AWS` +* Google Cloud Platform -> `gcp` or `GCP` +* Azure -> `azure` or `Azure` +* Libvirt -> `libvirt` or `Libvirt` +* OpenStack -> `openstack` or `OpenStack` + + +Enable Provisioning +------------------- + +Since CI testing requires the ability to provision via an API, we define the basic path for supporting a platform as having a minimal provisioning path in the OpenShift installer. Not all platforms we support will have full infrastructure provisioning supported, but the basic path must be invokable via Go code in openshift-install before a platform can be certified. This ensures we have at least one path to installation. + +The OpenShift installer has normal and hidden provisioners. The hidden provisioners are explicitly unsupported for production use but are supported for testing. + +1. Add a new hidden provisioner +2. Define the minimal platform parameters that the provisioner must support +3. Use Terraform or direct Go code to provision that platform via the credentials provided to the installer. + +A minimal provisioner must be able to launch the control plane and bootstrap node via an API call and accept any "environmental" settings like network or region as inputs. The installer should use the Route53 DNS provisioning code to set up round robin to the bootstrap and control plane nodes if necessary. + + +Enable Platform +--------------- + +OpenShift handles platform functionality as a set of operators running on the platform that interface with users, admins, and infrastructure. Because operators handle day 2 reconfiguration of the cluster, many "installation" related duties are delegated to the operators. + +Operators derive their configuration from top level API objects called `global configuration`. One such object is the `Infrastructure` global config, which reports which platform the cluster is running on. + +All operators that react to infrastructure must support a `None` option, and any unrecognized infrastructure platform **MUST** be treated as `None`. When an operator starts, it should log a single warning if the infrastructure provider is not recognized and then fall back to `None`. + +When adding a new platform to the installer, the infrastructure setting should happen automatically during bootstrapping, and if a component does not correctly treat your new platform as `None` it should be fixed immediately. + + +CI Job +------ + +The initial CI job for a new platform PR to `openshift/installer` must use the `cluster-installer-e2e` template but with an alternate profile, and the CI infrastructure should be configured with the credentials for your infrastructure in a `cluster-secrets-PLATFORM` secret. Talk to the testplatform team. +This CI job will then be reused whenever a repo wants to test, or when we add new release tests. + +A new platform should pass many of the kubernetes conformance tests, so the default job would run the e2e suite `kubernetes/conformance`. We may define a more scoped job if the platform cannot pass. + +The teardown behavior of the cluster is the hardest part of this process - because we run so many tests a day, it must be 100% reliable from the beginning. You should implement a reliable teardown mechanism in your `destroy` method, leveraging the OpenStack and AWS examples. + +We **will not** merge a new job if it does not have reliable cleanup in the face of failures, rate limits, etc, because it blocks other work. + + +Publishing Red Hat CoreOS Images +-------------------------------- + +RHCoS nodes can be upgraded to newer versions of kernel, userspace, and Kubelet post-creation. For this reason, the installer launches a recent version of RHCoS that is then upgraded at boot time to the version of RHCoS content that is included in the OpenShift release payload. + +Once a version of RHCoS supports the desired platform, an image with that OEM ID embedded must be published to either to the cloud or a publicly available download location on a regular schedule. The installer may then embed logic to identify the most recent location for the payload and automatically provide that to the installer provisioning steps. + + +Merge the initial platform support +---------------------------------- + +After all of the steps above have been completed, the pull request enabling the platform may be merged with documentation updated to indicate the platform is in an unsupported pre-release configuration. Other components may now begin their integration work. + + +Integration to individual operators +----------------------------------- + +1. Machine API Operator +2. Machine Config Operator +3. Cluster Storage Operator +4. Cloud Credential Operator +5. Cluster Ingress Operator +6. Cluster Image Registry Operator + +TODO: add details From c039c0815533f8d9bb7c2395778be41d0b2b26fd Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Wed, 15 Jul 2020 10:06:02 -0400 Subject: [PATCH 06/11] fill in packet Platform and Metadata types --- docs/dev/adding-new-platform.md | 314 ++-- go.mod | 6 +- go.sum | 29 +- pkg/asset/cluster/metadata.go | 4 + pkg/asset/cluster/packet/packet.go | 7 +- pkg/asset/ignition/machine/node.go | 6 + pkg/asset/installconfig/installconfig.go | 1 + pkg/types/packet/metadata.go | 5 + pkg/types/packet/platform.go | 43 + .../hashicorp/go-retryablehttp/README.md | 13 + .../hashicorp/go-retryablehttp/client.go | 206 +-- .../go-retryablehttp/roundtripper.go | 43 + .../hashicorp/terraform/flatmap/expand.go | 152 -- .../hashicorp/terraform/flatmap/flatten.go | 71 - .../hashicorp/terraform/flatmap/map.go | 82 - .../terraform/helper/config/decode.go | 28 - .../terraform/helper/config/validator.go | 214 --- .../terraform/helper/resource/error.go | 79 - .../helper/resource/grpc_test_provider.go | 43 - .../hashicorp/terraform/helper/resource/id.go | 45 - .../terraform/helper/resource/map.go | 140 -- .../terraform/helper/resource/resource.go | 49 - .../terraform/helper/resource/state.go | 259 ---- .../terraform/helper/resource/state_shim.go | 188 --- .../terraform/helper/resource/testing.go | 1320 ----------------- .../helper/resource/testing_config.go | 404 ----- .../helper/resource/testing_import_state.go | 232 --- .../terraform/helper/resource/wait.go | 84 -- .../api/config/v1/types_infrastructure.go | 24 - .../github.com/packethost/packngo/.drone.yml | 21 +- vendor/github.com/packethost/packngo/Makefile | 36 + .../github.com/packethost/packngo/README.md | 21 +- .../github.com/packethost/packngo/apikeys.go | 133 ++ .../packethost/packngo/bgp_configs.go | 2 +- .../packethost/packngo/bgp_sessions.go | 25 + .../github.com/packethost/packngo/connect.go | 148 -- .../github.com/packethost/packngo/devices.go | 179 ++- vendor/github.com/packethost/packngo/go.mod | 8 +- vendor/github.com/packethost/packngo/go.sum | 30 +- vendor/github.com/packethost/packngo/ip.go | 81 +- .../github.com/packethost/packngo/packngo.go | 169 ++- vendor/github.com/packethost/packngo/ports.go | 293 ++-- .../github.com/packethost/packngo/sshkeys.go | 2 +- .../github.com/packethost/packngo/volumes.go | 8 +- .../packet/config.go | 50 +- .../packet/datasource_packet_device.go | 286 ++++ .../datasource_packet_device_bgp_neighbors.go | 132 ++ .../datasource_packet_ip_block_ranges.go | 104 ++ .../datasource_packet_operating_system.go | 59 +- .../packet/datasource_packet_organization.go | 119 ++ .../datasource_packet_precreated_ip_block.go | 2 +- .../packet/datasource_packet_project.go | 168 +++ .../datasource_packet_spot_market_price.go | 2 +- .../datasource_packet_spot_market_request.go | 49 + .../packet/datasource_packet_volume.go | 188 +++ .../packet/errors.go | 19 +- .../packet/helpers_device.go | 247 +++ .../packet/provider.go | 23 +- .../packet/resource_packet_bgp_session.go | 6 +- .../packet/resource_packet_connect.go | 157 -- .../packet/resource_packet_device.go | 322 ++-- .../packet/resource_packet_ip_attachment.go | 2 +- .../packet/resource_packet_organization.go | 5 +- .../resource_packet_port_vlan_attachment.go | 123 +- .../packet/resource_packet_project.go | 16 +- .../packet/resource_packet_project_ssh_key.go | 40 +- .../resource_packet_reserved_ip_block.go | 16 +- .../resource_packet_spot_market_request.go | 19 +- .../packet/resource_packet_ssh_key.go | 21 +- .../packet/resource_packet_vlan.go | 3 +- .../packet/resource_packet_volume.go | 52 +- .../resource_packet_volume_attachment.go | 2 +- .../x/crypto/chacha20/chacha_generic.go | 61 +- vendor/golang.org/x/crypto/chacha20/xor.go | 17 +- .../golang.org/x/crypto/ssh/agent/client.go | 28 +- vendor/golang.org/x/crypto/ssh/cipher.go | 2 +- vendor/golang.org/x/crypto/ssh/kex.go | 4 +- vendor/golang.org/x/crypto/ssh/keys.go | 18 +- .../x/crypto/ssh/terminal/terminal.go | 8 + vendor/modules.txt | 11 +- 80 files changed, 3024 insertions(+), 4604 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/expand.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/flatten.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/map.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/config/decode.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/config/validator.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/error.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/id.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/map.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/wait.go create mode 100644 vendor/github.com/packethost/packngo/Makefile create mode 100644 vendor/github.com/packethost/packngo/apikeys.go delete mode 100644 vendor/github.com/packethost/packngo/connect.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device_bgp_neighbors.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_ip_block_ranges.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_organization.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_project.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_request.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_volume.go create mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/helpers_device.go delete mode 100644 vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go diff --git a/docs/dev/adding-new-platform.md b/docs/dev/adding-new-platform.md index 724b98c3d2b..6f433d39d1a 100644 --- a/docs/dev/adding-new-platform.md +++ b/docs/dev/adding-new-platform.md @@ -1,127 +1,209 @@ -How to add a new platform to OpenShift -====================================== +# How to add a new platform to OpenShift This document describes the process for adding a new platform to OpenShift in stages. Because there are many components to an automated platform, the process is defined in terms of delivering specific levels of functionality over time. -Terminology: +## Terminology * `User Provided Infrastructure (UPI)` - The documentation and enablement that describes how to launch OpenShift on a particular platform following the supported best practices. * `Install Provided Infrastructure (IPI)` - Infrastructure created as part of the install process following the best practices on a platform. IPI may support options that allow portions of the infrastructure to be user-provided. * `Cloud Provider` - The set of controllers in OpenShift that automatically manage storage, networking, and host failure detection by invoking infrastructure APIs. * `Dynamic Compute` - The 4.x cloud provider feature that allows OpenShift to automatically manage creating, deleting, and scaling nodes by invoking infrastructure APIs. Exposed via the Machine API (`Machine`, `MachineSet`, and `MachineDeployment`) and implemented per platform via an `actuator` controller. -The general order of adding a new platform for OpenShift is: +## New Platform Milestones ### Enable core platform -1. **Boot** - Ensure RH CoreOS boots on the desired platform, that Ignition works, and that you have VM / machine images to test with -2. **Arch** - Identify the correct opinionated configuration for a desired platform supporting the default features. -3. **CI** - Identify credentials and setup for a CI environment, ensure those credentials exist and can be used in the CI enviroment -4. **Name** - Identify and get approved the correct naming for adding a new platform to the core API objects (specifically the [infrastructure config](https://github.com/openshift/api/blob/master/config/v1/types_infrastructure.go) and the installer config (https://github.com/openshift/installer/blob/master/pkg/types/aws/doc.go)) so that we are consistent -5. **Enable Provisioning** Add a hidden installer option to this repo for the desired platform as a PR and implement the minimal features for bootstrap as well as a reliable teardown +1. **Boot** - Ensure RH CoreOS boots on the desired platform, that Ignition + works, and that you have VM / machine images to test with. Platforms that are + not supported by Ignition can provide support for their platform at + + ([example](https://github.com/coreos/ignition/pull/667)). +2. **Arch** - Identify the correct opinionated configuration for a desired + platform supporting the default features. +3. **CI** - Identify credentials and setup for a CI environment, ensure those + credentials exist and can be used in the CI environment +4. **Name** - For consistency, identify and get approved the correct naming for + adding a new platform to the core API objects (specifically the + [infrastructure + config](https://github.com/openshift/api/blob/master/config/v1/types_infrastructure.go) + and the installer config + (https://github.com/openshift/installer/blob/master/pkg/types/aws/doc.go)) + +5. **Enable Provisioning** Add a hidden installer option to this repo for the + desired platform as a PR and implement the minimal features for bootstrap as + well as a reliable teardown 6. **Enable Platform** Ensure all operators treat your platform as a no-op -7. **CI Job** Add a new CI job to the installer that uses the credentials above to run the installer against the platform and correctly tear down resources -8. **Publish Images** Ensure RH CoreOS images on the platform are being published to a location CI can test -9. **Merge** Merge the platform PR to the installer with a passing platform specific CI job +7. **CI Job** Add a new CI job to the installer that uses the credentials above + to run the installer against the platform and correctly tear down resources +8. **Publish Images** Ensure RH CoreOS images on the platform are being + published to a location CI can test +9. **Merge** Merge the platform PR to the installer with a passing platform + specific CI job + +Expected PRs: + +* : adding your Platform + TODO(displague) expected limitations At this point the platform is said to be an `unsupported IPI` (installer provided infrastructure) install - work can begin enabling in other repositories. Once these steps have been completed and official documentation is available, the platform can said to be `supported UPI without cloud provider` (user provided infrastructure) for the set of options in **Arch**. +Expected PRs: + +* : adding your Platform to types_infrastructure.go + ### Enable component infrastructure management Once the platform can be launched and tested, system features must be implemented. The sections below are roughly independent: +TODO(displague) Outline repo/files related to each phase, add context (why is this needed) + * General requirements: - * Replace the installer terraform destroy with one that doesn't rely on terraform state - * Ensure the installer IPI support is consistent with other platform features (private config, etc) - * Not all platforms will support all features, so IPI is taken to be a spectrum of support - * Enable a CI job that verifies the e2e suite for the given platform runs successfully + * Replace the installer `terraform destroy` with one that doesn't rely on + Terraform state. When the cluster components start creating additional + resources on the target platform, Terraform won't have knowledge of them. + * Ensure the installer IPI support is consistent with other platform features + (private config, etc) + * Not all platforms will support all features, so IPI is taken to be a + spectrum of support + * Enable a CI job that verifies the E2E suite for the given platform runs + successfully * Requirements for dynamic storage and dynamic load balancing - * Ensure the cloud provider is enabled in Kubernetes for your platform (this is required for `supported UPI with cloud provider`) - * Enable cluster-storage-operator to set the correct default storage class + * Ensure the cloud provider is enabled in Kubernetes for your platform (this + is required for `supported UPI with cloud provider`) + * Enable cluster-storage-operator to set the correct default storage class * Requirements for dynamic compute: - * Enable the cloud credential operator for the platform to subdivide access for individual operators - * Enable dynamic compute (MachineSets) by adding a cloud actuator for that platform + * Enable the cloud credential operator for the platform to subdivide access + for individual operators + * Enable dynamic compute (`MachineSets`) by adding a cloud actuator for that + platform * Requirements for dynamic ingress and images: - * Enable cluster-image-registry-operator to provision a storage bucket (if your platform supports object storage) - * Enable cluster-ingress-operator to provision the wildcard domain names + * Enable + [cluster-image-registry-operator](https://github.com/openshift/cluster-image-registry-operator#image-registry-operator) + to provision a storage bucket (if your platform supports object storage) + * Enable [cluster-ingress-operator](https://github.com/openshift/cluster-ingress-operator#openshift-ingress-operator) to provision the wildcard domain names -At this point the platform is said to be a `supported IPI with Dynamic Compute` if the platform supports -MachineSets, or `supported IPI without Dynamic Compute` if it does not. +At this point the platform is said to be a `supported IPI with Dynamic Compute` +if the platform supports `MachineSets`, or `supported IPI without Dynamic +Compute` if it does not. +## OpenShift Architectural Design -OpenShift Architectural Design ------------------------------- +OpenShift 4 combines Kubernetes, fully-managed host lifecycle, dynamic +infrastructure management, and a comprehensive set of fully-automated platform +extensions that can be upgraded and managed uniformly. -OpenShift 4 combines Kubernetes, fully-managed host lifecycle, dynamic infrastructure management, and a comprehensive set of fully-automated platform extensions that can be upgraded and managed uniformly. The foundation of the platform is Red Hat CoreOS, an immutable operating system based on RHEL that is capable of acting as a fully-integrated part of the cluster. The Kubernetes control plane is hosted on the cluster, along with a number of other fundamental extensions like cluster networking, ingress, storage, and application development tooling. Each of those extensions is fully managed on-cluster via a cluster operator that reacts to top level global configuration APIs and can automatically reconfigure the affected components. The Operator Lifecycle Manager (OLM) allows additional ecosystem operators to be installed, upgraded and managed. All of these components - from the operating system kernel to the web console - are part of a unified update lifecycle under the top level Cluster Version Operator which offers worry free rolling updates of the entire infrastructure. +The foundation of the platform is Red Hat CoreOS, an immutable operating system +based on RHEL that is capable of acting as a fully-integrated part of the +cluster. + +The Kubernetes control plane is hosted on the cluster, along with a number of +other fundamental extensions like cluster networking, ingress, storage, and +application development tooling. Each of those extensions is fully managed +on-cluster via a cluster operator that reacts to top level global configuration +APIs and can automatically reconfigure the affected components. + +The Operator Lifecycle Manager (OLM) allows additional ecosystem operators to be +installed, upgraded and managed. All of these components - from the operating +system kernel to the web console - are part of a unified update lifecycle under +the top level Cluster Version Operator which offers worry free rolling updates +of the entire infrastructure. ### Core configuration -An OpenShift cluster programs the infrastructure it runs on to provide operational simplicity. For every platform, the minimum requirements are: +An OpenShift cluster programs the infrastructure it runs on to provide operational simplicity. -1. The control plane nodes: +For every platform, the minimum requirements are: + +1. Control Plane Nodes: 1. Run RH CoreOS, allowing in-place updates - 2. Are fronted by a load balancer that allows raw TCP connections to port 6443 and exposes port 443 - 3. Have low latency interconnections connections (<5ms RTT) and persistent disks that survive reboot and are provisoned for at least 300 IOPS - 4. Have cloud or infrastructure firewall rules that at minimum allow the standard ports to be opened (see AWS provider) - 5. Do *not* have automatic cloud provider permissions to perform infrastructure API calls - 6. Have a domain name pointing to the load balancer IP(s) that is `api.` - 7. Has an internal DNS CNAME pointing to each master called `etcd-N.` that - 8. Has an optional internal load balancer that TCP load balances all master nodes, with a DNS name `internal-api.` pointing to the load balancer. -2. The bootstrap node: + 2. Are fronted by a load balancer that allows raw TCP connections to port + 6443 and exposes port 443 + 3. Meet hardware requirements: + * Have low latency interconnections connections (<5ms RTT) + * persistent disks that survive reboot + * provisioned for >= 300 IOPS + 4. Have cloud or infrastructure firewall rules that at minimum allow the + standard ports to be opened (see AWS provider) + 5. Do *not* have automatic cloud provider permissions to perform + infrastructure API calls + 6. Have a domain name pointing to the load balancer IP(s) that is + `api.` + 7. Has an internal DNS CNAME pointing to each control plane called + `-etcd-N.` + 8. Have an optional internal load balancer that TCP load balances all master + nodes, with a DNS name `internal-api.` pointing to the load + balancer. +2. One Bootstrap Node: 1. Runs RH CoreOS 2. Is reachable by control plane nodes over the network 3. Is part of the control plane load balancer until it is removed - 4. Can reach a network endpoint that hosts the bootstrap ignition file securely, or has the bootstrap ignition injected -3. All other compute nodes: - 1. Must be able to reach the internal IPs reported by the master nodes directly - 2. Have cloud or infrastructure firewall rules that at minimum allow ports 4789, 6443, 9000-10000, and 10250-10255 to be reachable + 4. Can reach a network endpoint that hosts the bootstrap Ignition file + securely, or has the bootstrap Ignition injected +3. Compute Nodes: + 1. Must be able to reach the internal IPs reported by the control plane nodes + directly + 2. Have cloud or infrastructure firewall rules that at minimum allow ports + 4789, 6443, 9000-10000, and 10250-10255 to be reachable The following clarifications to configurations are noted: -1. The control plane load balancer does not need to be exposed to the public internet, but the DNS entry must be visible from the location the installer is run. -2. Master nodes are not required to expose external IPs for SSH access, but can instead allow SSH from a bastion inside a protected network. -3. Compute nodes do not require external IPs +1. The control plane load balancer does not need to be exposed to the public + internet, but the DNS entry must be visible from the location the installer + is run. +2. Control plane and compute nodes are not required to expose external IPs for + SSH access, and can instead allow SSH from a bastion inside a protected + network. For dynamic infrastructure, the following permissions are required to be provided as part of the install: -1. Service LoadBalancer - Load balancers can be created and removed, infastructure nodes can be queried -2. Dynamic Storage - New volumes can be created, deleted, attached, and detached from nodes. Snapshot creation is optional if the platform supports snapshotting -3. Dynamic Compute - New instances can be created, deleted, and restarted inside of the cluster's network / infrastructure, plus any platform specific constructs like programming instance groups for master load balancing on GCP. - +1. Service LoadBalancer - Load balancers can be created and removed, + infrastructure nodes can be queried +2. Dynamic Storage - New volumes can be created, deleted, attached, and detached + from nodes. Snapshot creation is optional if the platform supports + them. +3. Dynamic Compute - New instances can be created, deleted, and restarted inside + of the cluster's network / infrastructure, plus any platform specific + constructs like programming instance groups for control plane load balancing on GCP. -Booting RH CoreOS ------------------ +## Booting RH CoreOS -Red Hat CoreOS uses ignition to receive initial configuration from a remote source. Ignition has platform specific behavior to read that configuration that is determined by the `oemID` embedded in the VM image. +Red Hat CoreOS uses Ignition to receive initial configuration from a remote source. Ignition has platform specific behavior to read that configuration that is determined by the `platformID` embedded in the VM image. -To boot RHCoS to a new platform, you must: +To boot RHCOS to a new platform, you must: -1. Ensure [ignition](https://github.com/coreos/ignition) supports that platform -2. Ensure that RHCoS has any necessary platform specific code to communicate with the platform (for instance, on Azure the instance must periodically health check) - see [cloud support tracker on Fedora CoreOS for more info](https://github.com/coreos/fedora-coreos-tracker/issues/95). -3. Have a RHCoS image with the appropriate oemID tag set. +1. Ensure [Ignition](https://github.com/coreos/ignition) [supports that + platform](https://github.com/coreos/ignition/blob/master/doc/supported-platforms.md). +2. Ensure that RHCOS has any necessary platform specific code to communicate + with the platform (for instance, on Azure the instance must periodically + health check) - see [cloud support tracker on Fedora CoreOS for more + info](https://github.com/coreos/fedora-coreos-tracker/issues/95). +3. Have a RHCOS image with the appropriate platformID tag set. -There is a script that assists you in converting the generic VM image to have a specific oemID set in the [coreos-assembler repo as gf-oemid](https://github.com/coreos/coreos-assembler/blob/master/src/gf-oemid). See the instructions there to create an image with the appropriate ID. +There is a script that assists you in converting the generic VM image to have a specific platformID set in the [coreos-assembler repo as gf-platformid](https://github.com/coreos/coreos-assembler/blob/master/src/gf-platformid). See the instructions there to create an image with the appropriate ID. Once you have uploaded the image to your platform, and the machine stays up, you can begin porting the installer to have a minimal IPI. +## Continuous Integration -Continuous Integration ----------------------- +All platforms require a core continuous integration testing loop that verifies that new changes do not regress our support for the platform. The minimum steps required are: -To enable a new platform, require a core continuous integration testing loop that verifies that new changes do not regress our support for the platform. The minimum steps required are: +1. Have an infrastructure that can receive API calls from the OpenShift CI + system to provision/destroy instances +2. Support at minimum 3 concurrent clusters on that infrastructure as "per + release image" testing (https://origin-release.svc.ci.openshift.org) that + verify a release supports that platform +3. Also support a per-PR target that can be selectively run on the installer, + core, and operator repositories in OpenShift in order to allow developers to + test incremental changes to those components -1. Have an infrastructure that can receive API calls from the OpenShift CI system to provision/destroy instances -2. Support at minimum 3 concurrent clusters on that infrastructure as "per release image" testing (https://origin-release.svc.ci.openshift.org) that verify a release supports that platform -3. Also support a per-PR target that can be selectively run on the installer, core, and operator repositories in OpenShift in order to allow developers to test incremental changes to those components +No PR will be merged to openshift/installer for platform support that cannot +satisfy the above steps. -No PR will be merged to openshift/installer for platform support that cannot satisfy the above steps. +## Naming - -Naming ------- - -The platform name will be part of our public API and must go through standard API review. The name -should be consistent with common usage of the platform and be recognizable to a consumer. +The platform name will be part of our public API and must go through standard +API review. The name should be consistent with common usage of the platform and +be recognizable to a consumer. The following names for platforms are good examples of what is expected: @@ -131,62 +213,90 @@ The following names for platforms are good examples of what is expected: * Libvirt -> `libvirt` or `Libvirt` * OpenStack -> `openstack` or `OpenStack` +## Enable Provisioning -Enable Provisioning -------------------- +Since CI testing requires the ability to provision via an API, we define the +basic path for supporting a platform as having a minimal provisioning path in +the OpenShift installer. Not all platforms we support will have full +infrastructure provisioning supported, but the basic path must be invocable via +Go code in `openshift-install` before a platform can be certified. This ensures +we have at least one path to installation. -Since CI testing requires the ability to provision via an API, we define the basic path for supporting a platform as having a minimal provisioning path in the OpenShift installer. Not all platforms we support will have full infrastructure provisioning supported, but the basic path must be invokable via Go code in openshift-install before a platform can be certified. This ensures we have at least one path to installation. - -The OpenShift installer has normal and hidden provisioners. The hidden provisioners are explicitly unsupported for production use but are supported for testing. +The OpenShift installer has normal and hidden provisioners. The hidden +provisioners are explicitly unsupported for production use but are supported for +testing. 1. Add a new hidden provisioner 2. Define the minimal platform parameters that the provisioner must support -3. Use Terraform or direct Go code to provision that platform via the credentials provided to the installer. - -A minimal provisioner must be able to launch the control plane and bootstrap node via an API call and accept any "environmental" settings like network or region as inputs. The installer should use the Route53 DNS provisioning code to set up round robin to the bootstrap and control plane nodes if necessary. +3. Use Terraform or direct Go code to provision that platform via the + credentials provided to the installer. +A minimal provisioner must be able to launch the control plane and bootstrap +node via an API call and accept any "environmental" settings like network or +region as inputs. The installer should use the Route53 DNS provisioning code to +set up round robin to the bootstrap and control plane nodes if the platform does not offer similar managed DNS services. -Enable Platform ---------------- +## Enable Platform -OpenShift handles platform functionality as a set of operators running on the platform that interface with users, admins, and infrastructure. Because operators handle day 2 reconfiguration of the cluster, many "installation" related duties are delegated to the operators. +OpenShift handles platform functionality as a set of operators running on the +platform that interface with users, admins, and infrastructure. Because +operators handle day 2 reconfiguration of the cluster, many "installation" +related duties are delegated to the operators. -Operators derive their configuration from top level API objects called `global configuration`. One such object is the `Infrastructure` global config, which reports which platform the cluster is running on. +Operators derive their configuration from top level API objects called `global +configuration`. One such object is the `Infrastructure` global config, which +reports which platform the cluster is running on. -All operators that react to infrastructure must support a `None` option, and any unrecognized infrastructure platform **MUST** be treated as `None`. When an operator starts, it should log a single warning if the infrastructure provider is not recognized and then fall back to `None`. +All operators that react to infrastructure must support a `None` option, and any +unrecognized infrastructure platform **MUST** be treated as `None`. When an +operator starts, it should log a single warning if the infrastructure provider +is not recognized and then fall back to `None`. -When adding a new platform to the installer, the infrastructure setting should happen automatically during bootstrapping, and if a component does not correctly treat your new platform as `None` it should be fixed immediately. +When adding a new platform to the installer, the infrastructure setting should +happen automatically during bootstrapping, and if a component does not correctly +treat your new platform as `None` it should be fixed immediately. +## CI Job -CI Job ------- +The initial CI job for a new platform PR to `openshift/installer` must use the +`cluster-installer-e2e` template but with an alternate profile, and the CI +infrastructure should be configured with the credentials for your infrastructure +in a `cluster-secrets-PLATFORM` secret. Talk to the testplatform team. This CI +job will then be reused whenever a repo wants to test, or when we add new +release tests. -The initial CI job for a new platform PR to `openshift/installer` must use the `cluster-installer-e2e` template but with an alternate profile, and the CI infrastructure should be configured with the credentials for your infrastructure in a `cluster-secrets-PLATFORM` secret. Talk to the testplatform team. -This CI job will then be reused whenever a repo wants to test, or when we add new release tests. +A new platform should pass many of the kubernetes conformance tests, so the +default job would run the e2e suite `kubernetes/conformance`. We may define a +more scoped job if the platform cannot pass. -A new platform should pass many of the kubernetes conformance tests, so the default job would run the e2e suite `kubernetes/conformance`. We may define a more scoped job if the platform cannot pass. - -The teardown behavior of the cluster is the hardest part of this process - because we run so many tests a day, it must be 100% reliable from the beginning. You should implement a reliable teardown mechanism in your `destroy` method, leveraging the OpenStack and AWS examples. +The teardown behavior of the cluster is the hardest part of this process - +because we run so many tests a day, it must be 100% reliable from the beginning. +You should implement a reliable teardown mechanism in your `destroy` method, +leveraging the OpenStack and AWS examples. We **will not** merge a new job if it does not have reliable cleanup in the face of failures, rate limits, etc, because it blocks other work. +## Publishing Red Hat CoreOS Images -Publishing Red Hat CoreOS Images --------------------------------- - -RHCoS nodes can be upgraded to newer versions of kernel, userspace, and Kubelet post-creation. For this reason, the installer launches a recent version of RHCoS that is then upgraded at boot time to the version of RHCoS content that is included in the OpenShift release payload. - -Once a version of RHCoS supports the desired platform, an image with that OEM ID embedded must be published to either to the cloud or a publicly available download location on a regular schedule. The installer may then embed logic to identify the most recent location for the payload and automatically provide that to the installer provisioning steps. - +RHCOS nodes can be upgraded to newer versions of kernel, userspace, and Kubelet +post-creation. For this reason, the installer launches a recent version of RHCOS +that is then upgraded at boot time to the version of RHCOS content that is +included in the OpenShift release payload. -Merge the initial platform support ----------------------------------- +Once a version of RHCOS supports the desired platform, an image with that +Platform ID embedded must be published to the cloud or a publicly available +download location on a regular schedule. The installer may then embed logic to +identify the most recent location for the payload and automatically provide that +to the installer provisioning steps. -After all of the steps above have been completed, the pull request enabling the platform may be merged with documentation updated to indicate the platform is in an unsupported pre-release configuration. Other components may now begin their integration work. +## Merge the initial platform support +After all of the steps above have been completed, the pull request enabling the +platform may be merged with documentation updated to indicate the platform is in +an unsupported pre-release configuration. Other components may now begin their +integration work. -Integration to individual operators ------------------------------------ +## Integration to individual operators 1. Machine API Operator 2. Machine Config Operator diff --git a/go.mod b/go.mod index 59e4ceaed3c..60b8acfe6ab 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ require ( github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27 github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200406133650-74a154c1d861 github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db // indirect - github.com/packethost/packngo v0.2.0 + github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f github.com/pborman/uuid v1.2.0 github.com/pierrec/lz4 v2.3.0+incompatible // indirect github.com/pkg/errors v0.9.1 @@ -80,14 +80,14 @@ require ( github.com/terraform-providers/terraform-provider-ignition v1.2.1 github.com/terraform-providers/terraform-provider-local v1.4.0 github.com/terraform-providers/terraform-provider-openstack v1.28.0 - github.com/terraform-providers/terraform-provider-packet v1.7.2 + github.com/terraform-providers/terraform-provider-packet v1.7.3-0.20200512085448-9717adf77547 github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f github.com/terraform-providers/terraform-provider-vsphere v1.16.2 github.com/ulikunitz/xz v0.5.6 github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 github.com/vmware/govmomi v0.22.2 github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect - golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 + golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a golang.org/x/lint v0.0.0-20200302205851-738671d3881b golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sys v0.0.0-20200331124033-c3d80250170d diff --git a/go.sum b/go.sum index a2bf84e2289..12355aab51b 100644 --- a/go.sum +++ b/go.sum @@ -232,6 +232,7 @@ github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3A github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.22.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.47/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -279,7 +280,6 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= github.com/btubbs/datetime v0.1.0/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= github.com/btubbs/datetime v0.1.1 h1:KuV+F9tyq/hEnezmKZNGk8dzqMVsId6EpFVrQCfA3To= github.com/btubbs/datetime v0.1.1/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= @@ -903,7 +903,6 @@ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoP github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-azure-helpers v0.4.1/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= @@ -915,12 +914,10 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= -github.com/hashicorp/go-getter v1.1.0/go.mod h1:q+PoBhh16brIKwJS9kt18jEtXHTg2EGkmrA9P7HVS+U= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 h1:l1KB3bHVdvegcIf5upQ5mjcHjs2qsWnKh4Yr9xgIuu8= github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.7.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -932,12 +929,10 @@ github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTg github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= @@ -950,6 +945,8 @@ github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -986,7 +983,6 @@ github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV github.com/hashicorp/hcl/v2 v2.1.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hcl2 v0.0.0-20190226234159-7e26f2f34612/go.mod h1:HtEzazM5AZ9fviNEof8QZB4T1Vz9UhHrGhnMPzl//Ek= github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93 h1:T1Q6ag9tCwun16AW+XK3tAql24P4uTGUMIn1/92WsQQ= github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= @@ -1518,9 +1514,8 @@ github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOTh github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db h1:9uViuKtx1jrlXLBW/pMnhOfzn3iSEdLase/But/IZRU= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= -github.com/packethost/packngo v0.1.1-0.20190410075950-a02c426e4888/go.mod h1:RQHg5xR1F614BwJyepfMqrKN+32IH0i7yX+ey43rEeQ= -github.com/packethost/packngo v0.2.0 h1:mSlzOof8PsOWCy78sBMt/PwMJTEjjQ/rRvMixu4Nm6c= -github.com/packethost/packngo v0.2.0/go.mod h1:RQHg5xR1F614BwJyepfMqrKN+32IH0i7yX+ey43rEeQ= +github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f h1:9JqXevAco/bD2ldBRC33NfFIDRZ3Xn1n40M/LS7/vDw= +github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f/go.mod h1:erURcsqYzwc9wSb04TX4so+s6F3uZtbXUil0W1LCGHA= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= @@ -1792,8 +1787,8 @@ github.com/terraform-providers/terraform-provider-null v1.0.1-0.20191204185112-e github.com/terraform-providers/terraform-provider-null v1.0.1-0.20191204185112-e5c592237f62/go.mod h1:RaAgicYv+oKLyZpaQB5BOkwL/t5WKYHQ+Q0kgMgXgR4= github.com/terraform-providers/terraform-provider-openstack v1.28.0 h1:yiT3Z5fDkJt0YX5BDkX/+0uwGpX/uNjVsuYqFIJ/kL0= github.com/terraform-providers/terraform-provider-openstack v1.28.0/go.mod h1:MxR5egxGj9OfPTj0VorSjpIVAi3OT24jOMiCBH/d7hU= -github.com/terraform-providers/terraform-provider-packet v1.7.2 h1:hYN7YsuR9dp4P/MPRRnh5m5J1/tw53BbXiLDlsDTWw4= -github.com/terraform-providers/terraform-provider-packet v1.7.2/go.mod h1:/k5o0Y30me0844mFLk5hM0TnP7OM3v1FC1hu7ZTTrNM= +github.com/terraform-providers/terraform-provider-packet v1.7.3-0.20200512085448-9717adf77547 h1:uuCEByGlsot8OXPt1jSyshw+p028cgNlvxOzFYPAm74= +github.com/terraform-providers/terraform-provider-packet v1.7.3-0.20200512085448-9717adf77547/go.mod h1:0/qYtOwKyCvOvLpyYUhk+mR20H7OW+gcukNcPaM/0aA= github.com/terraform-providers/terraform-provider-random v0.0.0-20190925200408-30dac3233094/go.mod h1:F4KE9YftuJyMiBth4W1kCrsyOHndtTjAmZ+ZzjqWY+4= github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f h1:oqZwtMD9/XcOcCzm/9cz8+pQWRTGF60N1RNcYLg+BCw= github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f/go.mod h1:F4KE9YftuJyMiBth4W1kCrsyOHndtTjAmZ+ZzjqWY+4= @@ -1894,8 +1889,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zclconf/go-cty v0.0.0-20190124225737-a385d646c1e9/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v0.0.0-20190212192503-19dda139b164/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.1/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= @@ -1960,7 +1953,6 @@ golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1972,7 +1964,6 @@ golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228050851-31a38585487a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1999,6 +1990,8 @@ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a h1:y6sBfNd1b9Wy08a6K1Z1DZc4aXABUN5TKjkYhz7UKmo= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2059,13 +2052,11 @@ golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2140,7 +2131,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2186,6 +2176,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191002091554-b397fe3ad8ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/pkg/asset/cluster/metadata.go b/pkg/asset/cluster/metadata.go index 3a3971a751c..4c6b7dade7a 100644 --- a/pkg/asset/cluster/metadata.go +++ b/pkg/asset/cluster/metadata.go @@ -15,6 +15,7 @@ import ( "github.com/openshift/installer/pkg/asset/cluster/libvirt" "github.com/openshift/installer/pkg/asset/cluster/openstack" "github.com/openshift/installer/pkg/asset/cluster/ovirt" + "github.com/openshift/installer/pkg/asset/cluster/packet" "github.com/openshift/installer/pkg/asset/cluster/vsphere" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/types" @@ -26,6 +27,7 @@ import ( nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -83,6 +85,8 @@ func (m *Metadata) Generate(parents asset.Parents) (err error) { metadata.ClusterPlatformMetadata.Ovirt = ovirt.Metadata(installConfig.Config) case vspheretypes.Name: metadata.ClusterPlatformMetadata.VSphere = vsphere.Metadata(installConfig.Config) + case packettypes.Name: + metadata.ClusterPlatformMetadata.Packet = packet.Metadata(installConfig.Config) case nonetypes.Name: default: return errors.Errorf("no known platform") diff --git a/pkg/asset/cluster/packet/packet.go b/pkg/asset/cluster/packet/packet.go index 559b3b7f094..1c6accc50fc 100644 --- a/pkg/asset/cluster/packet/packet.go +++ b/pkg/asset/cluster/packet/packet.go @@ -6,8 +6,11 @@ import ( "github.com/openshift/installer/pkg/types/packet" ) -// Metadata converts an install configuration to ovirt metadata. +// Metadata converts an install configuration to Packet metadata. func Metadata(config *types.InstallConfig) *packet.Metadata { - m := packet.Metadata{} + m := packet.Metadata{ + FacilityCode: config.Platform.Packet.FacilityCode, + ProjectID: config.Platform.Packet.ProjectID, + } return &m } diff --git a/pkg/asset/ignition/machine/node.go b/pkg/asset/ignition/machine/node.go index 8f500bc7e94..768d2ec9b83 100644 --- a/pkg/asset/ignition/machine/node.go +++ b/pkg/asset/ignition/machine/node.go @@ -12,6 +12,7 @@ import ( baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -35,6 +36,11 @@ func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, ro if installConfig.VSphere.APIVIP != "" { ignitionHost = net.JoinHostPort(installConfig.VSphere.APIVIP, "22623") } + case packettypes.Name: + if installConfig.Packet.APIVIP != "" { + ignitionHost = net.JoinHostPort(installConfig.Packet.APIVIP, "22623") + + } } return &ignition.Config{ Ignition: ignition.Ignition{ diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index 4175fbb0f6d..a6e750990d4 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -188,6 +188,7 @@ func (a *InstallConfig) platformValidation() error { } if a.Config.Platform.OpenStack != nil { return icopenstack.Validate(a.Config) + } if a.Config.Platform.Packet != nil { return icpacket.Validate(a.Config) } diff --git a/pkg/types/packet/metadata.go b/pkg/types/packet/metadata.go index 7475843bede..9805b8cc9dd 100644 --- a/pkg/types/packet/metadata.go +++ b/pkg/types/packet/metadata.go @@ -2,4 +2,9 @@ package packet // Metadata contains packet metadata (e.g. for uninstalling the cluster). type Metadata struct { + // FacilityCode represents the Packet region and datacenter where your devices will be provisioned (https://www.packet.com/developers/docs/getting-started/facilities/) + FacilityCode string `json:"facility_code,omitempty"` + + // ProjectID represents the Packet project used for logical grouping and invoicing (https://www.packet.com/developers/docs/API/getting-started/) + ProjectID string `json:"project_id,omitempty"` } diff --git a/pkg/types/packet/platform.go b/pkg/types/packet/platform.go index 4ee92d5c758..954de21a281 100644 --- a/pkg/types/packet/platform.go +++ b/pkg/types/packet/platform.go @@ -6,4 +6,47 @@ type Platform struct { // ProjectID represents the Packet project used for logical grouping and invoicing (https://www.packet.com/developers/docs/API/getting-started/) ProjectID string `json:"project_id,omitempty"` + + // APIVIP is the static IP on the nodes subnet that the api port for + // openshift will be assigned + // Default: will be set to the 5 on the first entry in the machineNetwork + // CIDR + // +optional + // +kubebuilder:validation:Format=ip + APIVIP string `json:"apivip,omitempty"` + + // DefaultMachinePlatform is the default configuration used when + // installing on bare metal for machine pools which do not define their own + // platform configuration. + // +optional + DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` + + // Network specifies an existing VPC where the cluster should be created + // rather than provisioning a new one. + // +optional + Network string `json:"network,omitempty"` + + // ControlPlaneSubnet is an existing subnet where the control plane will be deployed. + // The value should be the name of the subnet. + // +optional + ControlPlaneSubnet string `json:"controlPlaneSubnet,omitempty"` + + // ComputeSubnet is an existing subnet where the compute nodes will be deployed. + // The value should be the name of the subnet. + // +optional + ComputeSubnet string `json:"computeSubnet,omitempty"` + + // BootstrapOSImage is a URL to override the default OS image + // for the bootstrap node. The URL must contain a sha256 hash of the image + // e.g https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd... + // + // +optional + BootstrapOSImage string `json:"bootstrapOSImage,omitempty" validate:"omitempty,osimageuri,urlexist"` + + // ClusterOSImage is a URL to override the default OS image + // for cluster nodes. The URL must contain a sha256 hash of the image + // e.g https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8... + // + // +optional + ClusterOSImage string `json:"clusterOSImage,omitempty" validate:"omitempty,osimageuri,urlexist"` } diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index b8cc459bf7b..30357c75668 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -44,5 +44,18 @@ The returned response object is an `*http.Response`, the same thing you would usually get from `net/http`. Had the request failed one or more times, the above call would block and retry with exponential backoff. +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + For more usage and examples see the [godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index 7bfa75933e1..f1ccd3df35c 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -1,4 +1,4 @@ -// The retryablehttp package provides a familiar HTTP client interface with +// Package retryablehttp provides a familiar HTTP client interface with // automatic retries and exponential backoff. It is a thin wrapper over the // standard net/http client library and exposes nearly the same public API. // This makes retryablehttp very easy to drop into existing programs. @@ -119,95 +119,127 @@ func (r *Request) BodyBytes() ([]byte, error) { return buf.Bytes(), nil } +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { var bodyReader ReaderFunc var contentLength int64 - if rawBody != nil { - switch body := rawBody.(type) { - // If they gave us a function already, great! Use it. - case ReaderFunc: - bodyReader = body - tmp, err := body() - if err != nil { - return nil, 0, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - case func() (io.Reader, error): - bodyReader = body - tmp, err := body() - if err != nil { - return nil, 0, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } - // If a regular byte slice, we can read it over and over via new - // readers - case []byte: - buf := body - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // If a bytes.Buffer we can read the underlying byte slice over and - // over - case *bytes.Buffer: - buf := body - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf.Bytes()), nil - } - contentLength = int64(buf.Len()) + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } - // We prioritize *bytes.Reader here because we don't really want to - // deal with it seeking so want it to match here instead of the - // io.ReadSeeker case. - case *bytes.Reader: - buf, err := ioutil.ReadAll(body) - if err != nil { - return nil, 0, err - } - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // Compat case - case io.ReadSeeker: - raw := body - bodyReader = func() (io.Reader, error) { - _, err := raw.Seek(0, 0) - return ioutil.NopCloser(raw), err - } - if lr, ok := raw.(LenReader); ok { - contentLength = int64(lr.Len()) - } + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) - // Read all in so we can reset - case io.Reader: - buf, err := ioutil.ReadAll(body) - if err != nil { - return nil, 0, err - } - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } - default: - return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) } return bodyReader, contentLength, nil } @@ -415,7 +447,7 @@ func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) // perform linear backoff based on the attempt number and with jitter to // prevent a thundering herd. // -// min and max here are *not* absolute values. The number to be multipled by +// min and max here are *not* absolute values. The number to be multiplied by // the attempt number will be chosen at random from between them, thus they are // bounding the jitter. // @@ -545,7 +577,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { return resp, err } - // We do this before drainBody beause there's no need for the I/O if + // We do this before drainBody because there's no need for the I/O if // we're breaking out remain := c.RetryMax - i if remain <= 0 { @@ -663,3 +695,11 @@ func PostForm(url string, data url.Values) (*http.Response, error) { func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 00000000000..b841b4cfe53 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,43 @@ +package retryablehttp + +import ( + "net/http" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + return rt.Client.Do(retryableReq) +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go deleted file mode 100644 index b9d15461e0a..00000000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hcl2shim.UnknownVariableValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go deleted file mode 100644 index 9ff6e426526..00000000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go deleted file mode 100644 index 46b72c4014a..00000000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k, _ := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k, _ := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k, _ := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go deleted file mode 100644 index f470c9b4bee..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/config/decode.go +++ /dev/null @@ -1,28 +0,0 @@ -package config - -import ( - "github.com/mitchellh/mapstructure" -) - -func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { - var md mapstructure.Metadata - decoderConfig := &mapstructure.DecoderConfig{ - Metadata: &md, - Result: target, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decoderConfig) - if err != nil { - return nil, err - } - - for _, raw := range raws { - err := decoder.Decode(raw) - if err != nil { - return nil, err - } - } - - return &md, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go deleted file mode 100644 index 1a6e023b606..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/config/validator.go +++ /dev/null @@ -1,214 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/terraform" -) - -// Validator is a helper that helps you validate the configuration -// of your resource, resource provider, etc. -// -// At the most basic level, set the Required and Optional lists to be -// specifiers of keys that are required or optional. If a key shows up -// that isn't in one of these two lists, then an error is generated. -// -// The "specifiers" allowed in this is a fairly rich syntax to help -// describe the format of your configuration: -// -// * Basic keys are just strings. For example: "foo" will match the -// "foo" key. -// -// * Nested structure keys can be matched by doing -// "listener.*.foo". This will verify that there is at least one -// listener element that has the "foo" key set. -// -// * The existence of a nested structure can be checked by simply -// doing "listener.*" which will verify that there is at least -// one element in the "listener" structure. This is NOT -// validating that "listener" is an array. It is validating -// that it is a nested structure in the configuration. -// -type Validator struct { - Required []string - Optional []string -} - -func (v *Validator) Validate( - c *terraform.ResourceConfig) (ws []string, es []error) { - // Flatten the configuration so it is easier to reason about - flat := flatmap.Flatten(c.Raw) - - keySet := make(map[string]validatorKey) - for i, vs := range [][]string{v.Required, v.Optional} { - req := i == 0 - for _, k := range vs { - vk, err := newValidatorKey(k, req) - if err != nil { - es = append(es, err) - continue - } - - keySet[k] = vk - } - } - - purged := make([]string, 0) - for _, kv := range keySet { - p, w, e := kv.Validate(flat) - if len(w) > 0 { - ws = append(ws, w...) - } - if len(e) > 0 { - es = append(es, e...) - } - - purged = append(purged, p...) - } - - // Delete all the keys we processed in order to find - // the unknown keys. - for _, p := range purged { - delete(flat, p) - } - - // The rest are unknown - for k, _ := range flat { - es = append(es, fmt.Errorf("Unknown configuration: %s", k)) - } - - return -} - -type validatorKey interface { - // Validate validates the given configuration and returns viewed keys, - // warnings, and errors. - Validate(map[string]string) ([]string, []string, []error) -} - -func newValidatorKey(k string, req bool) (validatorKey, error) { - var result validatorKey - - parts := strings.Split(k, ".") - if len(parts) > 1 && parts[1] == "*" { - result = &nestedValidatorKey{ - Parts: parts, - Required: req, - } - } else { - result = &basicValidatorKey{ - Key: k, - Required: req, - } - } - - return result, nil -} - -// basicValidatorKey validates keys that are basic such as "foo" -type basicValidatorKey struct { - Key string - Required bool -} - -func (v *basicValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - for k, _ := range m { - // If we have the exact key its a match - if k == v.Key { - return []string{k}, nil, nil - } - } - - if !v.Required { - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", v.Key)} -} - -type nestedValidatorKey struct { - Parts []string - Required bool -} - -func (v *nestedValidatorKey) validate( - m map[string]string, - prefix string, - offset int) ([]string, []string, []error) { - if offset >= len(v.Parts) { - // We're at the end. Look for a specific key. - v2 := &basicValidatorKey{Key: prefix, Required: v.Required} - return v2.Validate(m) - } - - current := v.Parts[offset] - - // If we're at offset 0, special case to start at the next one. - if offset == 0 { - return v.validate(m, current, offset+1) - } - - // Determine if we're doing a "for all" or a specific key - if current != "*" { - // We're looking at a specific key, continue on. - return v.validate(m, prefix+"."+current, offset+1) - } - - // We're doing a "for all", so we loop over. - countStr, ok := m[prefix+".#"] - if !ok { - if !v.Required { - // It wasn't required, so its no problem. - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", prefix)} - } - - count, err := strconv.ParseInt(countStr, 0, 0) - if err != nil { - // This shouldn't happen if flatmap works properly - panic("invalid flatmap array") - } - - var e []error - var w []string - u := make([]string, 1, count+1) - u[0] = prefix + ".#" - for i := 0; i < int(count); i++ { - prefix := fmt.Sprintf("%s.%d", prefix, i) - - // Mark that we saw this specific key - u = append(u, prefix) - - // Mark all prefixes of this - for k, _ := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - u = append(u, k) - } - - // If we have more parts, then validate deeper - if offset+1 < len(v.Parts) { - u2, w2, e2 := v.validate(m, prefix, offset+1) - - u = append(u, u2...) - w = append(w, w2...) - e = append(e, e2...) - } - } - - return u, w, e -} - -func (v *nestedValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - return v.validate(m, "", 0) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go deleted file mode 100644 index 7ee21614b9f..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/error.go +++ /dev/null @@ -1,79 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "time" -) - -type NotFoundError struct { - LastError error - LastRequest interface{} - LastResponse interface{} - Message string - Retries int -} - -func (e *NotFoundError) Error() string { - if e.Message != "" { - return e.Message - } - - if e.Retries > 0 { - return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) - } - - return "couldn't find resource" -} - -// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending -type UnexpectedStateError struct { - LastError error - State string - ExpectedState []string -} - -func (e *UnexpectedStateError) Error() string { - return fmt.Sprintf( - "unexpected state '%s', wanted target '%s'. last error: %s", - e.State, - strings.Join(e.ExpectedState, ", "), - e.LastError, - ) -} - -// TimeoutError is returned when WaitForState times out -type TimeoutError struct { - LastError error - LastState string - Timeout time.Duration - ExpectedState []string -} - -func (e *TimeoutError) Error() string { - expectedState := "resource to be gone" - if len(e.ExpectedState) > 0 { - expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) - } - - extraInfo := make([]string, 0) - if e.LastState != "" { - extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) - } - if e.Timeout > 0 { - extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) - } - - suffix := "" - if len(extraInfo) > 0 { - suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) - } - - if e.LastError != nil { - return fmt.Sprintf("timeout while waiting for %s%s: %s", - expectedState, suffix, e.LastError) - } - - return fmt.Sprintf("timeout while waiting for %s%s", - expectedState, suffix) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go deleted file mode 100644 index f8dcd124af0..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package resource - -import ( - "context" - "net" - "time" - - proto "github.com/hashicorp/terraform-plugin-sdk/tfplugin5" - "github.com/hashicorp/terraform/helper/plugin" - tfplugin "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC -// shim and starts it in a grpc server using an inmem connection. It returns a -// GRPCClient for this new server to test the shimmed resource provider. -func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { - listener := bufconn.Listen(256 * 1024) - grpcServer := grpc.NewServer() - - p := plugin.NewGRPCProviderServerShim(rp) - proto.RegisterProviderServer(grpcServer, p) - - go grpcServer.Serve(listener) - - conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { - return listener.Dial() - }), grpc.WithInsecure()) - if err != nil { - panic(err) - } - - var pp tfplugin.GRPCProviderPlugin - client, _ := pp.GRPCClient(context.Background(), nil, conn) - - grpcClient := client.(*tfplugin.GRPCProvider) - grpcClient.TestServer = grpcServer - - return grpcClient -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go deleted file mode 100644 index 44949550e73..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/id.go +++ /dev/null @@ -1,45 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "sync" - "time" -) - -const UniqueIdPrefix = `terraform-` - -// idCounter is a monotonic counter for generating ordered unique ids. -var idMutex sync.Mutex -var idCounter uint32 - -// Helper for a resource to generate a unique identifier w/ default prefix -func UniqueId() string { - return PrefixedUniqueId(UniqueIdPrefix) -} - -// UniqueIDSuffixLength is the string length of the suffix generated by -// PrefixedUniqueId. This can be used by length validation functions to -// ensure prefixes are the correct length for the target field. -const UniqueIDSuffixLength = 26 - -// Helper for a resource to generate a unique identifier w/ given prefix -// -// After the prefix, the ID consists of an incrementing 26 digit value (to match -// previous timestamp output). After the prefix, the ID consists of a timestamp -// and an incrementing 8 hex digit value The timestamp means that multiple IDs -// created with the same prefix will sort in the order of their creation, even -// across multiple terraform executions, as long as the clock is not turned back -// between calls, and as long as any given terraform execution generates fewer -// than 4 billion IDs. -func PrefixedUniqueId(prefix string) string { - // Be precise to 4 digits of fractional seconds, but remove the dot before the - // fractional seconds. - timestamp := strings.Replace( - time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) - - idMutex.Lock() - defer idMutex.Unlock() - idCounter++ - return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go deleted file mode 100644 index a465136f778..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/map.go +++ /dev/null @@ -1,140 +0,0 @@ -package resource - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform/terraform" -) - -// Map is a map of resources that are supported, and provides helpers for -// more easily implementing a ResourceProvider. -type Map struct { - Mapping map[string]Resource -} - -func (m *Map) Validate( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := m.Mapping[t] - if !ok { - return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} - } - - // If there is no validator set, then it is valid - if r.ConfigValidator == nil { - return nil, nil - } - - return r.ConfigValidator.Validate(c) -} - -// Apply performs a create or update depending on the diff, and calls -// the proper function on the matching Resource. -func (m *Map) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource if it is created - err := r.Destroy(s, meta) - if err != nil { - return s, err - } - - s.ID = "" - } - - // If we're only destroying, and not creating, then return now. - // Otherwise, we continue so that we can create a new resource. - if !d.RequiresNew() { - return nil, nil - } - } - - var result *terraform.InstanceState - var err error - if s.ID == "" { - result, err = r.Create(s, d, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf( - "Resource type '%s' doesn't support update", - info.Type) - } - - result, err = r.Update(s, d, meta) - } - if result != nil { - if result.Attributes == nil { - result.Attributes = make(map[string]string) - } - - result.Attributes["id"] = result.ID - } - - return result, err -} - -// Diff performs a diff on the proper resource type. -func (m *Map) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, meta) -} - -// Refresh performs a Refresh on the proper resource type. -// -// Refresh on the Resource won't be called if the state represents a -// non-created resource (ID is blank). -// -// An error is returned if the resource isn't registered. -func (m *Map) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the resource isn't created, don't refresh. - if s.ID == "" { - return s, nil - } - - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Refresh(s, meta) -} - -// Resources returns all the resources that are supported by this -// resource map and can be used to satisfy the Resources method of -// a ResourceProvider. -func (m *Map) Resources() []terraform.ResourceType { - ks := make([]string, 0, len(m.Mapping)) - for k, _ := range m.Mapping { - ks = append(ks, k) - } - sort.Strings(ks) - - rs := make([]terraform.ResourceType, 0, len(m.Mapping)) - for _, k := range ks { - rs = append(rs, terraform.ResourceType{ - Name: k, - }) - } - - return rs -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go deleted file mode 100644 index 0d9c831a651..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go +++ /dev/null @@ -1,49 +0,0 @@ -package resource - -import ( - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/terraform" -) - -type Resource struct { - ConfigValidator *config.Validator - Create CreateFunc - Destroy DestroyFunc - Diff DiffFunc - Refresh RefreshFunc - Update UpdateFunc -} - -// CreateFunc is a function that creates a resource that didn't previously -// exist. -type CreateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) - -// DestroyFunc is a function that destroys a resource that previously -// exists using the state. -type DestroyFunc func( - *terraform.InstanceState, - interface{}) error - -// DiffFunc is a function that performs a diff of a resource. -type DiffFunc func( - *terraform.InstanceState, - *terraform.ResourceConfig, - interface{}) (*terraform.InstanceDiff, error) - -// RefreshFunc is a function that performs a refresh of a specific type -// of resource. -type RefreshFunc func( - *terraform.InstanceState, - interface{}) (*terraform.InstanceState, error) - -// UpdateFunc is a function that is called to update a resource that -// previously existed. The difference between this and CreateFunc is that -// the diff is guaranteed to only contain attributes that don't require -// a new resource. -type UpdateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go deleted file mode 100644 index 88a839664c1..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ /dev/null @@ -1,259 +0,0 @@ -package resource - -import ( - "log" - "time" -) - -var refreshGracePeriod = 30 * time.Second - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an EC2 instance after refreshing -// it. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, err error) - -// StateChangeConf is the configuration struct used for `WaitForState`. -type StateChangeConf struct { - Delay time.Duration // Wait this time before starting checks - Pending []string // States that are "allowed" and will continue trying - Refresh StateRefreshFunc // Refreshes the current state - Target []string // Target state - Timeout time.Duration // The amount of time to wait before timeout - MinTimeout time.Duration // Smallest time to wait before refreshes - PollInterval time.Duration // Override MinTimeout/backoff and only poll this often - NotFoundChecks int // Number of times to allow not found - - // This is to work around inconsistent APIs - ContinuousTargetOccurence int // Number of times the Target state has to occur continuously -} - -// WaitForState watches an object and waits for it to achieve the state -// specified in the configuration using the specified Refresh() func, -// waiting the number of seconds specified in the timeout configuration. -// -// If the Refresh function returns an error, exit immediately with that error. -// -// If the Refresh function returns a state other than the Target state or one -// listed in Pending, return immediately with an error. -// -// If the Timeout is exceeded before reaching the Target state, return an -// error. -// -// Otherwise, the result is the result of the first call to the Refresh function to -// reach the target state. -func (conf *StateChangeConf) WaitForState() (interface{}, error) { - log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) - - notfoundTick := 0 - targetOccurence := 0 - - // Set a default for times to check for not found - if conf.NotFoundChecks == 0 { - conf.NotFoundChecks = 20 - } - - if conf.ContinuousTargetOccurence == 0 { - conf.ContinuousTargetOccurence = 1 - } - - type Result struct { - Result interface{} - State string - Error error - Done bool - } - - // Read every result from the refresh loop, waiting for a positive result.Done. - resCh := make(chan Result, 1) - // cancellation channel for the refresh loop - cancelCh := make(chan struct{}) - - result := Result{} - - go func() { - defer close(resCh) - - time.Sleep(conf.Delay) - - // start with 0 delay for the first loop - var wait time.Duration - - for { - // store the last result - resCh <- result - - // wait and watch for cancellation - select { - case <-cancelCh: - return - case <-time.After(wait): - // first round had no wait - if wait == 0 { - wait = 100 * time.Millisecond - } - } - - res, currentState, err := conf.Refresh() - result = Result{ - Result: res, - State: currentState, - Error: err, - } - - if err != nil { - resCh <- result - return - } - - // If we're waiting for the absence of a thing, then return - if res == nil && len(conf.Target) == 0 { - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - - if res == nil { - // If we didn't find the resource, check if we have been - // not finding it for awhile, and if so, report an error. - notfoundTick++ - if notfoundTick > conf.NotFoundChecks { - result.Error = &NotFoundError{ - LastError: err, - Retries: notfoundTick, - } - resCh <- result - return - } - } else { - // Reset the counter for when a resource isn't found - notfoundTick = 0 - found := false - - for _, allowed := range conf.Target { - if currentState == allowed { - found = true - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - } - - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - targetOccurence = 0 - break - } - } - - if !found && len(conf.Pending) > 0 { - result.Error = &UnexpectedStateError{ - LastError: err, - State: result.State, - ExpectedState: conf.Target, - } - resCh <- result - return - } - } - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } - - // If a poll interval has been specified, choose that interval. - // Otherwise bound the default value. - if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { - wait = conf.PollInterval - } else { - if wait < conf.MinTimeout { - wait = conf.MinTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second - } - } - - log.Printf("[TRACE] Waiting %s before next try", wait) - } - }() - - // store the last value result from the refresh loop - lastResult := Result{} - - timeout := time.After(conf.Timeout) - for { - select { - case r, ok := <-resCh: - // channel closed, so return the last result - if !ok { - return lastResult.Result, lastResult.Error - } - - // we reached the intended state - if r.Done { - return r.Result, r.Error - } - - // still waiting, store the last result - lastResult = r - - case <-timeout: - log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) - log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) - - // cancel the goroutine and start our grace period timer - close(cancelCh) - timeout := time.After(refreshGracePeriod) - - // we need a for loop and a label to break on, because we may have - // an extra response value to read, but still want to wait for the - // channel to close. - forSelect: - for { - select { - case r, ok := <-resCh: - if r.Done { - // the last refresh loop reached the desired state - return r.Result, r.Error - } - - if !ok { - // the goroutine returned - break forSelect - } - - // target state not reached, save the result for the - // TimeoutError and wait for the channel to close - lastResult = r - case <-timeout: - log.Println("[ERROR] WaitForState exceeded refresh grace period") - break forSelect - } - } - - return nil, &TimeoutError{ - LastError: lastResult.Error, - LastState: lastResult.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go deleted file mode 100644 index 257109d3b6c..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go +++ /dev/null @@ -1,188 +0,0 @@ -package resource - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests -func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { - state := terraform.NewState() - - // in the odd case of a nil state, let the helper packages handle it - if newState == nil { - return nil, nil - } - - for _, newMod := range newState.Modules { - mod := state.AddModule(newMod.Addr) - - for name, out := range newMod.OutputValues { - outputType := "" - val := hcl2shim.ConfigValueFromHCL2(out.Value) - ty := out.Value.Type() - switch { - case ty == cty.String: - outputType = "string" - case ty.IsTupleType() || ty.IsListType(): - outputType = "list" - case ty.IsMapType(): - outputType = "map" - } - - mod.Outputs[name] = &terraform.OutputState{ - Type: outputType, - Value: val, - Sensitive: out.Sensitive, - } - } - - for _, res := range newMod.Resources { - resType := res.Addr.Type - providerType := res.ProviderConfig.ProviderConfig.Type - - resource := getResource(providers, providerType.LegacyString(), res.Addr) - - for key, i := range res.Instances { - resState := &terraform.ResourceState{ - Type: resType, - Provider: res.ProviderConfig.String(), - } - - // We should always have a Current instance here, but be safe about checking. - if i.Current != nil { - flatmap, err := shimmedAttributes(i.Current, resource) - if err != nil { - return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if i.Current.Private != nil { - err := json.Unmarshal(i.Current.Private, &meta) - if err != nil { - return nil, err - } - } - - resState.Primary = &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: i.Current.Status == states.ObjectTainted, - Meta: meta, - } - - if i.Current.SchemaVersion != 0 { - if resState.Primary.Meta == nil { - resState.Primary.Meta = map[string]interface{}{} - } - resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion - } - - for _, dep := range i.Current.DependsOn { - resState.Dependencies = append(resState.Dependencies, dep.String()) - } - - // convert the indexes to the old style flapmap indexes - idx := "" - switch key.(type) { - case addrs.IntKey: - // don't add numeric index values to resources with a count of 0 - if len(res.Instances) > 1 { - idx = fmt.Sprintf(".%d", key) - } - case addrs.StringKey: - idx = "." + key.String() - } - - mod.Resources[res.Addr.String()+idx] = resState - } - - // add any deposed instances - for _, dep := range i.Deposed { - flatmap, err := shimmedAttributes(dep, resource) - if err != nil { - return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if dep.Private != nil { - err := json.Unmarshal(dep.Private, &meta) - if err != nil { - return nil, err - } - } - - deposed := &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: dep.Status == states.ObjectTainted, - Meta: meta, - } - if dep.SchemaVersion != 0 { - deposed.Meta = map[string]interface{}{ - "schema_version": dep.SchemaVersion, - } - } - - resState.Deposed = append(resState.Deposed, deposed) - } - } - } - } - - return state, nil -} - -func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { - p := providers[providerName] - if p == nil { - panic(fmt.Sprintf("provider %q not found in test step", providerName)) - } - - // this is only for tests, so should only see schema.Providers - provider := p.(*schema.Provider) - - switch addr.Mode { - case addrs.ManagedResourceMode: - resource := provider.ResourcesMap[addr.Type] - if resource != nil { - return resource - } - case addrs.DataResourceMode: - resource := provider.DataSourcesMap[addr.Type] - if resource != nil { - return resource - } - } - - panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) -} - -func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { - flatmap := instance.AttrsFlat - if flatmap != nil { - return flatmap, nil - } - - // if we have json attrs, they need to be decoded - rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) - if err != nil { - return nil, err - } - - instanceState, err := res.ShimInstanceStateFromValue(rio.Value) - if err != nil { - return nil, err - } - - return instanceState.Attributes, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go deleted file mode 100644 index 3153d8447d4..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ /dev/null @@ -1,1320 +0,0 @@ -package resource - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "regexp" - "strings" - "syscall" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/colorstring" - - "github.com/hashicorp/terraform-plugin-sdk/tfdiags" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configload" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// flagSweep is a flag available when running tests on the command line. It -// contains a comma seperated list of regions to for the sweeper functions to -// run in. This flag bypasses the normal Test path and instead runs functions designed to -// clean up any leaked resources a testing environment could have created. It is -// a best effort attempt, and relies on Provider authors to implement "Sweeper" -// methods for resources. - -// Adding Sweeper methods with AddTestSweepers will -// construct a list of sweeper funcs to be called here. We iterate through -// regions provided by the sweep flag, and for each region we iterate through the -// tests, and exit on any errors. At time of writing, sweepers are ran -// sequentially, however they can list dependencies to be ran first. We track -// the sweepers that have been ran, so as to not run a sweeper twice for a given -// region. -// -// WARNING: -// Sweepers are designed to be destructive. You should not use the -sweep flag -// in any environment that is not strictly a test environment. Resources will be -// destroyed. - -var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") -var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") -var sweeperFuncs map[string]*Sweeper - -// map of sweepers that have ran, and the success/fail status based on any error -// raised -var sweeperRunList map[string]bool - -// type SweeperFunc is a signature for a function that acts as a sweeper. It -// accepts a string for the region that the sweeper is to be ran in. This -// function must be able to construct a valid client for that region. -type SweeperFunc func(r string) error - -type Sweeper struct { - // Name for sweeper. Must be unique to be ran by the Sweeper Runner - Name string - - // Dependencies list the const names of other Sweeper functions that must be ran - // prior to running this Sweeper. This is an ordered list that will be invoked - // recursively at the helper/resource level - Dependencies []string - - // Sweeper function that when invoked sweeps the Provider of specific - // resources - F SweeperFunc -} - -func init() { - sweeperFuncs = make(map[string]*Sweeper) -} - -// AddTestSweepers function adds a given name and Sweeper configuration -// pair to the internal sweeperFuncs map. Invoke this function to register a -// resource sweeper to be available for running when the -sweep flag is used -// with `go test`. Sweeper names must be unique to help ensure a given sweeper -// is only ran once per run. -func AddTestSweepers(name string, s *Sweeper) { - if _, ok := sweeperFuncs[name]; ok { - log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) - } - - sweeperFuncs[name] = s -} - -func TestMain(m *testing.M) { - flag.Parse() - if *flagSweep != "" { - // parse flagSweep contents for regions to run - regions := strings.Split(*flagSweep, ",") - - // get filtered list of sweepers to run based on sweep-run flag - sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) - for _, region := range regions { - region = strings.TrimSpace(region) - // reset sweeperRunList for each region - sweeperRunList = map[string]bool{} - - log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) - for _, sweeper := range sweepers { - if err := runSweeperWithRegion(region, sweeper); err != nil { - log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) - } - } - - log.Printf("Sweeper Tests ran:\n") - for s, _ := range sweeperRunList { - fmt.Printf("\t- %s\n", s) - } - } - } else { - os.Exit(m.Run()) - } -} - -// filterSweepers takes a comma seperated string listing the names of sweepers -// to be ran, and returns a filtered set from the list of all of sweepers to -// run based on the names given. -func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { - filterSlice := strings.Split(strings.ToLower(f), ",") - if len(filterSlice) == 1 && filterSlice[0] == "" { - // if the filter slice is a single element of "" then no sweeper list was - // given, so just return the full list - return source - } - - sweepers := make(map[string]*Sweeper) - for name, sweeper := range source { - for _, s := range filterSlice { - if strings.Contains(strings.ToLower(name), s) { - sweepers[name] = sweeper - } - } - } - return sweepers -} - -// runSweeperWithRegion recieves a sweeper and a region, and recursively calls -// itself with that region for every dependency found for that sweeper. If there -// are no dependencies, invoke the contained sweeper fun with the region, and -// add the success/fail status to the sweeperRunList. -func runSweeperWithRegion(region string, s *Sweeper) error { - for _, dep := range s.Dependencies { - if depSweeper, ok := sweeperFuncs[dep]; ok { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) - if err := runSweeperWithRegion(region, depSweeper); err != nil { - return err - } - } else { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) - } - } - - if _, ok := sweeperRunList[s.Name]; ok { - log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) - return nil - } - - runE := s.F(region) - if runE == nil { - sweeperRunList[s.Name] = true - } else { - sweeperRunList[s.Name] = false - } - - return runE -} - -const TestEnvVar = "TF_ACC" - -// TestProvider can be implemented by any ResourceProvider to provide custom -// reset functionality at the start of an acceptance test. -// The helper/schema Provider implements this interface. -type TestProvider interface { - TestReset() error -} - -// TestCheckFunc is the callback type used with acceptance tests to check -// the state of a resource. The state passed in is the latest state known, -// or in the case of being after a destroy, it is the last known state when -// it was created. -type TestCheckFunc func(*terraform.State) error - -// ImportStateCheckFunc is the check function for ImportState tests -type ImportStateCheckFunc func([]*terraform.InstanceState) error - -// ImportStateIdFunc is an ID generation function to help with complex ID -// generation for ImportState tests. -type ImportStateIdFunc func(*terraform.State) (string, error) - -// TestCase is a single acceptance test case used to test the apply/destroy -// lifecycle of a resource in a specific configuration. -// -// When the destroy plan is executed, the config from the last TestStep -// is used to plan it. -type TestCase struct { - // IsUnitTest allows a test to run regardless of the TF_ACC - // environment variable. This should be used with care - only for - // fast tests on local resources (e.g. remote state with a local - // backend) but can be used to increase confidence in correct - // operation of Terraform without waiting for a full acctest run. - IsUnitTest bool - - // PreCheck, if non-nil, will be called before any test steps are - // executed. It will only be executed in the case that the steps - // would run, so it can be used for some validation before running - // acceptance tests, such as verifying that keys are setup. - PreCheck func() - - // Providers is the ResourceProvider that will be under test. - // - // Alternately, ProviderFactories can be specified for the providers - // that are valid. This takes priority over Providers. - // - // The end effect of each is the same: specifying the providers that - // are used within the tests. - Providers map[string]terraform.ResourceProvider - ProviderFactories map[string]terraform.ResourceProviderFactory - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // CheckDestroy is called after the resource is finally destroyed - // to allow the tester to test that the resource is truly gone. - CheckDestroy TestCheckFunc - - // Steps are the apply sequences done within the context of the - // same state. Each step can have its own check to verify correctness. - Steps []TestStep - - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. - // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string - IDRefreshIgnore []string -} - -// TestStep is a single apply sequence of a test, done within the -// context of a state. -// -// Multiple TestSteps can be sequenced in a Test to allow testing -// potentially complex update logic. In general, simply create/destroy -// tests will only need one step. -type TestStep struct { - // ResourceName should be set to the name of the resource - // that is being tested. Example: "aws_instance.foo". Various test - // modes use this to auto-detect state information. - // - // This is only required if the test mode settings below say it is - // for the mode you're using. - ResourceName string - - // PreConfig is called before the Config is applied to perform any per-step - // setup that needs to happen. This is called regardless of "test mode" - // below. - PreConfig func() - - // Taint is a list of resource addresses to taint prior to the execution of - // the step. Be sure to only include this at a step where the referenced - // address will be present in state, as it will fail the test if the resource - // is missing. - // - // This option is ignored on ImportState tests, and currently only works for - // resources in the root module path. - Taint []string - - //--------------------------------------------------------------- - // Test modes. One of the following groups of settings must be - // set to determine what the test step will do. Ideally we would've - // used Go interfaces here but there are now hundreds of tests we don't - // want to re-type so instead we just determine which step logic - // to run based on what settings below are set. - //--------------------------------------------------------------- - - //--------------------------------------------------------------- - // Plan, Apply testing - //--------------------------------------------------------------- - - // Config a string of the configuration to give to Terraform. If this - // is set, then the TestCase will execute this step with the same logic - // as a `terraform apply`. - Config string - - // Check is called after the Config is applied. Use this step to - // make your own API calls to check the status of things, and to - // inspect the format of the ResourceState itself. - // - // If an error is returned, the test will fail. In this case, a - // destroy plan will still be attempted. - // - // If this is nil, no check is done on this step. - Check TestCheckFunc - - // Destroy will create a destroy plan if set to true. - Destroy bool - - // ExpectNonEmptyPlan can be set to true for specific types of tests that are - // looking to verify that a diff occurs - ExpectNonEmptyPlan bool - - // ExpectError allows the construction of test cases that we expect to fail - // with an error. The specified regexp must match against the error for the - // test to pass. - ExpectError *regexp.Regexp - - // PlanOnly can be set to only run `plan` with this configuration, and not - // actually apply it. This is useful for ensuring config changes result in - // no-op plans - PlanOnly bool - - // PreventDiskCleanup can be set to true for testing terraform modules which - // require access to disk at runtime. Note that this will leave files in the - // temp folder - PreventDiskCleanup bool - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // SkipFunc is called before applying config, but after PreConfig - // This is useful for defining test steps with platform-dependent checks - SkipFunc func() (bool, error) - - //--------------------------------------------------------------- - // ImportState testing - //--------------------------------------------------------------- - - // ImportState, if true, will test the functionality of ImportState - // by importing the resource with ResourceName (must be set) and the - // ID of that resource. - ImportState bool - - // ImportStateId is the ID to perform an ImportState operation with. - // This is optional. If it isn't set, then the resource ID is automatically - // determined by inspecting the state for ResourceName's ID. - ImportStateId string - - // ImportStateIdPrefix is the prefix added in front of ImportStateId. - // This can be useful in complex import cases, where more than one - // attribute needs to be passed on as the Import ID. Mainly in cases - // where the ID is not known, and a known prefix needs to be added to - // the unset ImportStateId field. - ImportStateIdPrefix string - - // ImportStateIdFunc is a function that can be used to dynamically generate - // the ID for the ImportState tests. It is sent the state, which can be - // checked to derive the attributes necessary and generate the string in the - // desired format. - ImportStateIdFunc ImportStateIdFunc - - // ImportStateCheck checks the results of ImportState. It should be - // used to verify that the resulting value of ImportState has the - // proper resources, IDs, and attributes. - ImportStateCheck ImportStateCheckFunc - - // ImportStateVerify, if true, will also check that the state values - // that are finally put into the state after import match for all the - // IDs returned by the Import. Note that this checks for strict equality - // and does not respect DiffSuppressFunc or CustomizeDiff. - // - // ImportStateVerifyIgnore is a list of prefixes of fields that should - // not be verified to be equal. These can be set to ephemeral fields or - // fields that can't be refreshed and don't matter. - ImportStateVerify bool - ImportStateVerifyIgnore []string - - // provider s is used internally to maintain a reference to the - // underlying providers during the tests - providers map[string]terraform.ResourceProvider -} - -// Set to a file mask in sprintf format where %s is test name -const EnvLogPathMask = "TF_LOG_PATH_MASK" - -func LogOutput(t TestT) (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := logging.CurrentLogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - - if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { - // Escape special characters which may appear if we have subtests - testName := strings.Replace(t.Name(), "/", "__", -1) - - logPath := fmt.Sprintf(logPathMask, testName) - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - // This was the default since the beginning - logOutput = &logging.LevelFilter{ - Levels: logging.ValidLevels, - MinLevel: logging.LogLevel(logLevel), - Writer: logOutput, - } - - return -} - -// ParallelTest performs an acceptance test on a resource, allowing concurrency -// with other ParallelTest. -// -// Tests will fail if they do not properly handle conditions to allow multiple -// tests to occur against the same resource or service (e.g. random naming). -// All other requirements of the Test function also apply to this function. -func ParallelTest(t TestT, c TestCase) { - t.Parallel() - Test(t, c) -} - -// Test performs an acceptance test on a resource. -// -// Tests are not run unless an environmental variable "TF_ACC" is -// set to some non-empty value. This is to avoid test cases surprising -// a user by creating real resources. -// -// Tests will fail unless the verbose flag (`go test -v`, or explicitly -// the "-test.v" flag) is set. Because some acceptance tests take quite -// long, we require the verbose flag so users are able to see progress -// output. -func Test(t TestT, c TestCase) { - // We only run acceptance tests if an env var is set because they're - // slow and generally require some outside configuration. You can opt out - // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) - return - } - - logWriter, err := LogOutput(t) - if err != nil { - t.Error(fmt.Errorf("error setting up logging: %s", err)) - } - log.SetOutput(logWriter) - - // We require verbose mode so that the user knows what is going on. - if !testTesting && !testing.Verbose() && !c.IsUnitTest { - t.Fatal("Acceptance tests must be run with the -v flag on tests") - return - } - - // Run the PreCheck if we have it - if c.PreCheck != nil { - c.PreCheck() - } - - // get instances of all providers, so we can use the individual - // resources to shim the state during the tests. - providers := make(map[string]terraform.ResourceProvider) - for name, pf := range testProviderFactories(c) { - p, err := pf() - if err != nil { - t.Fatal(err) - } - providers[name] = p - } - - providerResolver, err := testProviderResolver(c) - if err != nil { - t.Fatal(err) - } - - opts := terraform.ContextOpts{ProviderResolver: providerResolver} - - // A single state variable to track the lifecycle, starting with no state - var state *terraform.State - - // Go through each step and run it - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - errored := false - for i, step := range c.Steps { - // insert the providers into the step so we can get the resources for - // shimming the state - step.providers = providers - - var err error - log.Printf("[DEBUG] Test: Executing step %d", i) - - if step.SkipFunc != nil { - skip, err := step.SkipFunc() - if err != nil { - t.Fatal(err) - } - if skip { - log.Printf("[WARN] Skipping step %d", i) - continue - } - } - - if step.Config == "" && !step.ImportState { - err = fmt.Errorf( - "unknown test mode for step. Please see TestStep docs\n\n%#v", - step) - } else { - if step.ImportState { - if step.Config == "" { - step.Config = testProviderConfig(c) - } - - // Can optionally set step.Config in addition to - // step.ImportState, to provide config for the import. - state, err = testStepImportState(opts, state, step) - } else { - state, err = testStepConfig(opts, state, step) - } - } - - // If we expected an error, but did not get one, fail - if err == nil && step.ExpectError != nil { - errored = true - t.Error(fmt.Sprintf( - "Step %d, no error received, but expected a match to:\n\n%s\n\n", - i, step.ExpectError)) - break - } - - // If there was an error, exit - if err != nil { - // Perhaps we expected an error? Check if it matches - if step.ExpectError != nil { - if !step.ExpectError.MatchString(err.Error()) { - errored = true - t.Error(fmt.Sprintf( - "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", - i, err, step.ExpectError)) - break - } - } else { - errored = true - t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) - break - } - } - - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - log.Printf( - "[WARN] Test: Running ID-only refresh check on %s", - idRefreshCheck.Primary.ID) - if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { - log.Printf("[ERROR] Test: ID-only test failed: %s", err) - t.Error(fmt.Sprintf( - "[ERROR] Test: ID-only test failed: %s", err)) - break - } - } - } - } - - // If we never checked an id-only refresh, it is a failure. - if idRefresh { - if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { - t.Error("ID-only refresh check never ran.") - } - } - - // If we have a state, then run the destroy - if state != nil { - lastStep := c.Steps[len(c.Steps)-1] - destroyStep := TestStep{ - Config: lastStep.Config, - Check: c.CheckDestroy, - Destroy: true, - PreventDiskCleanup: lastStep.PreventDiskCleanup, - PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, - providers: providers, - } - - log.Printf("[WARN] Test: Executing destroy step") - state, err := testStep(opts, state, destroyStep) - if err != nil { - t.Error(fmt.Sprintf( - "Error destroying resource! WARNING: Dangling resources\n"+ - "may exist. The full state and error is shown below.\n\n"+ - "Error: %s\n\nState: %s", - err, - state)) - } - } else { - log.Printf("[WARN] Skipping destroy test since there is no state.") - } -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) string { - var lines []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - - return strings.Join(lines, "") -} - -// testProviderFactories combines the fixed Providers and -// ResourceProviderFactory functions into a single map of -// ResourceProviderFactory functions. -func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { - ctxProviders := make(map[string]terraform.ResourceProviderFactory) - for k, pf := range c.ProviderFactories { - ctxProviders[k] = pf - } - - // add any fixed providers - for k, p := range c.Providers { - ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) - } - return ctxProviders -} - -// testProviderResolver is a helper to build a ResourceProviderResolver -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderResolver(c TestCase) (providers.Resolver, error) { - ctxProviders := testProviderFactories(c) - - // wrap the old provider factories in the test grpc server so they can be - // called from terraform. - newProviders := make(map[addrs.Provider]providers.Factory) - - for k, pf := range ctxProviders { - factory := pf // must copy to ensure each closure sees its own value - newProviders[addrs.NewLegacyProvider(k)] = func() (providers.Interface, error) { - p, err := factory() - if err != nil { - return nil, err - } - - // The provider is wrapped in a GRPCTestProvider so that it can be - // passed back to terraform core as a providers.Interface, rather - // than the legacy ResourceProvider. - return GRPCTestProvider(p), nil - } - } - - return providers.ResolverFixed(newProviders), nil -} - -// UnitTest is a helper to force the acceptance testing harness to run in the -// normal unit test suite. This should only be used for resource that don't -// have any external dependencies. -func UnitTest(t TestT, c TestCase) { - c.IsUnitTest = true - Test(t, c) -} - -func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { - // TODO: We guard by this right now so master doesn't explode. We - // need to remove this eventually to make this part of the normal tests. - if os.Getenv("TF_ACC_IDONLY") == "" { - return nil - } - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: r.Type, - Name: "foo", - }.Instance(addrs.NoKey) - absAddr := addr.Absolute(addrs.RootModuleInstance) - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := states.NewState() - state.RootModule().SetResourceInstanceCurrent( - addr, - &states.ResourceInstanceObjectSrc{ - AttrsFlat: r.Primary.Attributes, - Status: states.ObjectReady, - }, - addrs.ProviderConfig{Type: addrs.NewLegacyProvider("placeholder")}.Absolute(addrs.RootModuleInstance), - ) - - // Create the config module. We use the full config because Refresh - // doesn't have access to it and we may need things like provider - // configurations. The initial implementation of id-only checks used - // an empty config module, but that caused the aforementioned problems. - cfg, err := testConfig(opts, step) - if err != nil { - return err - } - - // Initialize the context - opts.Config = cfg - opts.State = state - ctx, ctxDiags := terraform.NewContext(&opts) - if ctxDiags.HasErrors() { - return ctxDiags.Err() - } - if diags := ctx.Validate(); len(diags) > 0 { - if diags.HasErrors() { - return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) - } - - // Refresh! - state, refreshDiags := ctx.Refresh() - if refreshDiags.HasErrors() { - return refreshDiags.Err() - } - - // Verify attribute equivalence. - actualR := state.ResourceInstance(absAddr) - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Current == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Current.AttrsFlat - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k, _ := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k, _ := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} - -func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { - if step.PreConfig != nil { - step.PreConfig() - } - - cfgPath, err := ioutil.TempDir("", "tf-test") - if err != nil { - return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) - } - - if step.PreventDiskCleanup { - log.Printf("[INFO] Skipping defer os.RemoveAll call") - } else { - defer os.RemoveAll(cfgPath) - } - - // Write the main configuration file - err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating temporary file for config: %s", err) - } - - // Create directory for our child modules, if any. - modulesDir := filepath.Join(cfgPath, ".modules") - err = os.Mkdir(modulesDir, os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating child modules directory: %s", err) - } - - inst := initwd.NewModuleInstaller(modulesDir, nil) - _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) - if installDiags.HasErrors() { - return nil, installDiags.Err() - } - - loader, err := configload.NewLoader(&configload.Config{ - ModulesDir: modulesDir, - }) - if err != nil { - return nil, fmt.Errorf("failed to create config loader: %s", err) - } - - config, configDiags := loader.LoadConfig(cfgPath) - if configDiags.HasErrors() { - return nil, configDiags - } - - return config, nil -} - -func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.ResourceName]; ok { - return v, nil - } - } - } - - return nil, fmt.Errorf( - "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) -} - -// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - for i, f := range fs { - if err := f(s); err != nil { - return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) - } - } - - return nil - } -} - -// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -// -// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the -// TestCheckFuncs and aggregates failures. -func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - var result *multierror.Error - - for i, f := range fs { - if err := f(s); err != nil { - result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) - } - } - - return result.ErrorOrNil() - } -} - -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. -func TestCheckResourceAttrSet(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - } -} - -// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with -// support for non-root modules -func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - } -} - -func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { - if val, ok := is.Attributes[key]; !ok || val == "" { - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) - } - - return nil -} - -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. -func TestCheckResourceAttr(name, key, value string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - } -} - -// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with -// support for non-root modules -func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - } -} - -func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { - // Empty containers may be elided from the state. - // If the intent here is to check for an empty container, allow the key to - // also be non-existent. - emptyCheck := false - if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - emptyCheck = true - } - - if v, ok := is.Attributes[key]; !ok || v != value { - if emptyCheck && !ok { - return nil - } - - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } - - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) - } - return nil -} - -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. -func TestCheckNoResourceAttr(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - } -} - -// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with -// support for non-root modules -func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - } -} - -func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { - // Empty containers may sometimes be included in the state. - // If the intent here is to check for an empty container, allow the value to - // also be "0". - emptyCheck := false - if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { - emptyCheck = true - } - - val, exists := is.Attributes[key] - if emptyCheck && val == "0" { - return nil - } - - if exists { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) - } - - return nil -} - -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. -func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - } -} - -// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with -// support for non-root modules -func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - } -} - -func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) - } - - return nil -} - -// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the -// value is a pointer so that it can be updated while the test is running. -// It will only be dereferenced at the point this step is run. -func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckResourceAttr(name, key, *value)(s) - } -} - -// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with -// support for non-root modules -func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckModuleResourceAttr(mp, name, key, *value)(s) - } -} - -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. -func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { - return func(s *terraform.State) error { - isFirst, err := primaryInstanceState(s, nameFirst) - if err != nil { - return err - } - - isSecond, err := primaryInstanceState(s, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } -} - -// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with -// support for non-root modules -func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { - mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() - mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() - return func(s *terraform.State) error { - isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) - if err != nil { - return err - } - - isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } -} - -func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { - vFirst, okFirst := isFirst.Attributes[keyFirst] - vSecond, okSecond := isSecond.Attributes[keySecond] - - // Container count values of 0 should not be relied upon, and not reliably - // maintained by helper/schema. For the purpose of tests, consider unset and - // 0 to be equal. - if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && - (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { - // they have the same suffix, and it is a collection count key. - if vFirst == "0" || vFirst == "" { - okFirst = false - } - if vSecond == "0" || vSecond == "" { - okSecond = false - } - } - - if okFirst != okSecond { - if !okFirst { - return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) - } - return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) - } - if !(okFirst || okSecond) { - // If they both don't exist then they are equally unset, so that's okay. - return nil - } - - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) - } - - return nil -} - -// TestCheckOutput checks an output in the Terraform configuration -func TestCheckOutput(name, value string) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Value != value { - return fmt.Errorf( - "Output '%s': expected %#v, got %#v", - name, - value, - rs) - } - - return nil - } -} - -func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if !r.MatchString(rs.Value.(string)) { - return fmt.Errorf( - "Output '%s': %#v didn't match %q", - name, - rs, - r.String()) - } - - return nil - } -} - -// TestT is the interface used to handle the test lifecycle of a test. -// -// Users should just use a *testing.T object, which implements this. -type TestT interface { - Error(args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) - Name() string - Parallel() -} - -// This is set to true by unit tests to alter some behavior -var testTesting = false - -// modulePrimaryInstanceState returns the instance state for the given resource -// name in a ModuleState -func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { - rs, ok := ms.Resources[name] - if !ok { - return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) - } - - is := rs.Primary - if is == nil { - return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) - } - - return is, nil -} - -// modulePathPrimaryInstanceState returns the primary instance state for the -// given resource name in a given module path. -func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { - ms := s.ModuleByPath(mp) - if ms == nil { - return nil, fmt.Errorf("No module found at: %s", mp) - } - - return modulePrimaryInstanceState(s, ms, name) -} - -// primaryInstanceState returns the primary instance state for the given -// resource name in the root module. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() - return modulePrimaryInstanceState(s, ms, name) -} - -// operationError is a specialized implementation of error used to describe -// failures during one of the several operations performed for a particular -// test case. -type operationError struct { - OpName string - Diags tfdiags.Diagnostics -} - -func newOperationError(opName string, diags tfdiags.Diagnostics) error { - return operationError{opName, diags} -} - -// Error returns a terse error string containing just the basic diagnostic -// messages, for situations where normal Go error behavior is appropriate. -func (err operationError) Error() string { - return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) -} - -// ErrorDetail is like Error except it includes verbosely-rendered diagnostics -// similar to what would come from a normal Terraform run, which include -// additional context not included in Error(). -func (err operationError) ErrorDetail() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "errors during %s:", err.OpName) - clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} - for _, diag := range err.Diags { - diagStr := format.Diagnostic(diag, nil, clr, 78) - buf.WriteByte('\n') - buf.WriteString(diagStr) - } - return buf.String() -} - -// detailedErrorMessage is a helper for calling ErrorDetail on an error if -// it is an operationError or just taking Error otherwise. -func detailedErrorMessage(err error) string { - switch tErr := err.(type) { - case operationError: - return tErr.ErrorDetail() - default: - return err.Error() - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go deleted file mode 100644 index c3893798d23..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ /dev/null @@ -1,404 +0,0 @@ -package resource - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "log" - "sort" - "strings" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/tfdiags" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/terraform" -) - -// testStepConfig runs a config-mode test step -func testStepConfig( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - return testStep(opts, state, step) -} - -func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { - if !step.Destroy { - if err := testStepTaint(state, step); err != nil { - return state, err - } - } - - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - var stepDiags tfdiags.Diagnostics - - // Build the context - opts.Config = cfg - opts.State, err = terraform.ShimLegacyState(state) - if err != nil { - return nil, err - } - - opts.Destroy = step.Destroy - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) - } - if stepDiags := ctx.Validate(); len(stepDiags) > 0 { - if stepDiags.HasErrors() { - return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", stepDiags) - } - - // Refresh! - newState, stepDiags := ctx.Refresh() - // shim the state first so the test can check the state on errors - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("refresh", stepDiags) - } - - // If this step is a PlanOnly step, skip over this first Plan and subsequent - // Apply, and use the follow up Plan that checks for perpetual diffs - if !step.PlanOnly { - // Plan! - if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("plan", stepDiags) - } else { - log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) - } - - // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behavior in the check - // function - stateBeforeApplication := state.DeepCopy() - - // Apply the diff, creating real resources. - newState, stepDiags = ctx.Apply() - // shim the state first so the test can check the state on errors - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("apply", stepDiags) - } - - // Run any configured checks - if step.Check != nil { - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Now, verify that Plan is now empty and we don't have a perpetual diff issue - // We do this with TWO plans. One without a refresh. - var p *plans.Plan - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("follow-up plan", stepDiags) - } - if !p.Changes.Empty() { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // And another after a Refresh. - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - newState, stepDiags = ctx.Refresh() - if stepDiags.HasErrors() { - return state, newOperationError("follow-up refresh", stepDiags) - } - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - } - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("second follow-up refresh", stepDiags) - } - empty := p.Changes.Empty() - - // Data resources are tricky because they legitimately get instantiated - // during refresh so that they will be already populated during the - // plan walk. Because of this, if we have any data resources in the - // config we'll end up wanting to destroy them again here. This is - // acceptable and expected, and we'll treat it as "empty" for the - // sake of this testing. - if step.Destroy && !empty { - empty = true - for _, change := range p.Changes.Resources { - if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { - empty = false - break - } - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && empty { - return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // Made it here? Good job test step! - return state, nil -} - -// legacyPlanComparisonString produces a string representation of the changes -// from a plan and a given state togther, as was formerly produced by the -// String method of terraform.Plan. -// -// This is here only for compatibility with existing tests that predate our -// new plan and state types, and should not be used in new tests. Instead, use -// a library like "cmp" to do a deep equality and diff on the two -// data structures. -func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { - return fmt.Sprintf( - "DIFF:\n\n%s\n\nSTATE:\n\n%s", - legacyDiffComparisonString(changes), - state.String(), - ) -} - -// legacyDiffComparisonString produces a string representation of the changes -// from a planned changes object, as was formerly produced by the String method -// of terraform.Diff. -// -// This is here only for compatibility with existing tests that predate our -// new plan types, and should not be used in new tests. Instead, use a library -// like "cmp" to do a deep equality check and diff on the two data structures. -func legacyDiffComparisonString(changes *plans.Changes) string { - // The old string representation of a plan was grouped by module, but - // our new plan structure is not grouped in that way and so we'll need - // to preprocess it in order to produce that grouping. - type ResourceChanges struct { - Current *plans.ResourceInstanceChangeSrc - Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc - } - byModule := map[string]map[string]*ResourceChanges{} - resourceKeys := map[string][]string{} - requiresReplace := map[string][]string{} - var moduleKeys []string - for _, rc := range changes.Resources { - if rc.Action == plans.NoOp { - // We won't mention no-op changes here at all, since the old plan - // model we are emulating here didn't have such a concept. - continue - } - moduleKey := rc.Addr.Module.String() - if _, exists := byModule[moduleKey]; !exists { - moduleKeys = append(moduleKeys, moduleKey) - byModule[moduleKey] = make(map[string]*ResourceChanges) - } - resourceKey := rc.Addr.Resource.String() - if _, exists := byModule[moduleKey][resourceKey]; !exists { - resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) - byModule[moduleKey][resourceKey] = &ResourceChanges{ - Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), - } - } - - if rc.DeposedKey == states.NotDeposed { - byModule[moduleKey][resourceKey].Current = rc - } else { - byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc - } - - rr := []string{} - for _, p := range rc.RequiredReplace.List() { - rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) - } - requiresReplace[resourceKey] = rr - } - sort.Strings(moduleKeys) - for _, ks := range resourceKeys { - sort.Strings(ks) - } - - var buf bytes.Buffer - - for _, moduleKey := range moduleKeys { - rcs := byModule[moduleKey] - var mBuf bytes.Buffer - - for _, resourceKey := range resourceKeys[moduleKey] { - rc := rcs[resourceKey] - - forceNewAttrs := requiresReplace[resourceKey] - - crud := "UPDATE" - if rc.Current != nil { - switch rc.Current.Action { - case plans.DeleteThenCreate: - crud = "DESTROY/CREATE" - case plans.CreateThenDelete: - crud = "CREATE/DESTROY" - case plans.Delete: - crud = "DESTROY" - case plans.Create: - crud = "CREATE" - } - } else { - // We must be working on a deposed object then, in which - // case destroying is the only possible action. - crud = "DESTROY" - } - - extra := "" - if rc.Current == nil && len(rc.Deposed) > 0 { - extra = " (deposed only)" - } - - fmt.Fprintf( - &mBuf, "%s: %s%s\n", - crud, resourceKey, extra, - ) - - attrNames := map[string]bool{} - var oldAttrs map[string]string - var newAttrs map[string]string - if rc.Current != nil { - if before := rc.Current.Before; before != nil { - ty, err := before.ImpliedType() - if err == nil { - val, err := before.Decode(ty) - if err == nil { - oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range oldAttrs { - attrNames[k] = true - } - } - } - } - if after := rc.Current.After; after != nil { - ty, err := after.ImpliedType() - if err == nil { - val, err := after.Decode(ty) - if err == nil { - newAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range newAttrs { - attrNames[k] = true - } - } - } - } - } - if oldAttrs == nil { - oldAttrs = make(map[string]string) - } - if newAttrs == nil { - newAttrs = make(map[string]string) - } - - attrNamesOrder := make([]string, 0, len(attrNames)) - keyLen := 0 - for n := range attrNames { - attrNamesOrder = append(attrNamesOrder, n) - if len(n) > keyLen { - keyLen = len(n) - } - } - sort.Strings(attrNamesOrder) - - for _, attrK := range attrNamesOrder { - v := newAttrs[attrK] - u := oldAttrs[attrK] - - if v == hcl2shim.UnknownVariableValue { - v = "" - } - // NOTE: we don't support here because we would - // need schema to do that. Excluding sensitive values - // is now done at the UI layer, and so should not be tested - // at the core layer. - - updateMsg := "" - - // This may not be as precise as in the old diff, as it matches - // everything under the attribute that was originally marked as - // ForceNew, but should help make it easier to determine what - // caused replacement here. - for _, k := range forceNewAttrs { - if strings.HasPrefix(attrK, k) { - updateMsg = " (forces new resource)" - break - } - } - - fmt.Fprintf( - &mBuf, " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, v, - updateMsg, - ) - } - } - - if moduleKey == "" { // root module - buf.Write(mBuf.Bytes()) - buf.WriteByte('\n') - continue - } - - fmt.Fprintf(&buf, "%s:\n", moduleKey) - s := bufio.NewScanner(&mBuf) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return buf.String() -} - -func testStepTaint(state *terraform.State, step TestStep) error { - for _, p := range step.Taint { - m := state.RootModule() - if m == nil { - return errors.New("no state") - } - rs, ok := m.Resources[p] - if !ok { - return fmt.Errorf("resource %q not found in state", p) - } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) - rs.Taint() - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go deleted file mode 100644 index 9a3ef1be029..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ /dev/null @@ -1,232 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// testStepImportState runs an import state test step -func testStepImportState( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - - // Determine the ID to import - var importId string - switch { - case step.ImportStateIdFunc != nil: - var err error - importId, err = step.ImportStateIdFunc(state) - if err != nil { - return state, err - } - case step.ImportStateId != "": - importId = step.ImportStateId - default: - resource, err := testResource(step, state) - if err != nil { - return state, err - } - importId = resource.Primary.ID - } - - importPrefix := step.ImportStateIdPrefix - if importPrefix != "" { - importId = fmt.Sprintf("%s%s", importPrefix, importId) - } - - // Setup the context. We initialize with an empty state. We use the - // full config for provider configurations. - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - opts.Config = cfg - - // import tests start with empty state - opts.State = states.NewState() - - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, stepDiags.Err() - } - - // The test step provides the resource address as a string, so we need - // to parse it to get an addrs.AbsResourceAddress to pass in to the - // import method. - traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) - if hclDiags.HasErrors() { - return nil, hclDiags - } - importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) - if stepDiags.HasErrors() { - return nil, stepDiags.Err() - } - - // Do the import - importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ - // Set the module so that any provider config is loaded - Config: cfg, - - Targets: []*terraform.ImportTarget{ - &terraform.ImportTarget{ - Addr: importAddr, - ID: importId, - }, - }, - }) - if stepDiags.HasErrors() { - log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) - return state, stepDiags.Err() - } - - newState, err := shimNewState(importedState, step.providers) - if err != nil { - return nil, err - } - // Go through the new state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range newState.RootModule().Resources { - if r.Primary != nil { - is := r.Primary.DeepCopy() - is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type - states = append(states, is) - } - } - if err := step.ImportStateCheck(states); err != nil { - return state, err - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := newState.RootModule().Resources - old := state.RootModule().Resources - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for _, r2 := range old { - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { - oldR = r2 - break - } - } - if oldR == nil { - return state, fmt.Errorf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - var rsrcSchema *schema.Resource - if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { - providerType := providerAddr.ProviderConfig.Type.LegacyString() - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - } - - // don't add empty flatmapped containers, so we can more easily - // compare the attributes - skipEmpty := func(k, v string) bool { - if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { - if v == "0" { - return true - } - } - return false - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - actual[k] = v - } - - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return state, fmt.Errorf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - // Return the old state (non-imported) so we don't change anything. - return state, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go deleted file mode 100644 index e56a5155d10..00000000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go +++ /dev/null @@ -1,84 +0,0 @@ -package resource - -import ( - "sync" - "time" -) - -// Retry is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -func Retry(timeout time.Duration, f RetryFunc) error { - // These are used to pull the error out of the function; need a mutex to - // avoid a data race. - var resultErr error - var resultErrMu sync.Mutex - - c := &StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * time.Millisecond, - Refresh: func() (interface{}, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil - } - return nil, "quit", rerr.Err - }, - } - - _, waitErr := c.WaitForState() - - // Need to acquire the lock here to be able to avoid race using resultErr as - // the return value - resultErrMu.Lock() - defer resultErrMu.Unlock() - - // resultErr may be nil because the wait timed out and resultErr was never - // set; this is still an error - if resultErr == nil { - return waitErr - } - // resultErr takes precedence over waitErr if both are set because it is - // more likely to be useful - return resultErr -} - -// RetryFunc is the function retried until it succeeds. -type RetryFunc func() *RetryError - -// RetryError is the required return type of RetryFunc. It forces client code -// to choose whether or not a given error is retryable. -type RetryError struct { - Err error - Retryable bool -} - -// RetryableError is a helper to create a RetryError that's retryable from a -// given error. -func RetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: true} -} - -// NonRetryableError is a helper to create a RetryError that's _not_ retryable -// from a given error. -func NonRetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: false} -} diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index a264874a691..9c9dd2b6fed 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -112,9 +112,6 @@ const ( // IBMCloudPlatformType represents IBM Cloud infrastructure. IBMCloudPlatformType PlatformType = "IBMCloud" - - // PacketPlatformType represents Packet infrastructure. - PacketPlatformType PlatformType = "Packet" ) // IBMCloudProviderType is a specific supported IBM Cloud provider cluster type @@ -175,10 +172,6 @@ type PlatformSpec struct { // IBMCloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - - // Packet contains settings specific to the Packet infrastructure provider. - // +optional - Packet *PacketPlatformSpec `json:"packet,omitempty"` } // PlatformStatus holds the current status specific to the underlying infrastructure provider @@ -229,10 +222,6 @@ type PlatformStatus struct { // IBMCloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` - - // Packet contains settings specific to the Packet infrastructure provider. - // +optional - Packet *PacketPlatformStatus `json:"platform,omitempty"` } // AWSServiceEndpoint store the configuration of a custom url to @@ -449,19 +438,6 @@ type IBMCloudPlatformStatus struct { ProviderType IBMCloudProviderType `json:"providerType,omitempty"` } -// PacketPlatformSpec holds the desired state of the Packet Platform infrastructure provider. -// This only includes fields that can be modified in the cluster. -type PacketPlatformSpec struct{} - -// PacketPlatformStatus holds the current status of the Packet Platform infrastructure provider. -type PacketPlatformStatus struct { - // ProjectID for new Packet resources created for the cluster. - ProjectID string `json:"projectID"` - - // Facility for new Packet resources created for the cluster. - Facility string `json:"facility"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InfrastructureList is diff --git a/vendor/github.com/packethost/packngo/.drone.yml b/vendor/github.com/packethost/packngo/.drone.yml index 84cd7b6dbc0..522261b234a 100644 --- a/vendor/github.com/packethost/packngo/.drone.yml +++ b/vendor/github.com/packethost/packngo/.drone.yml @@ -4,25 +4,16 @@ workspace: pipeline: lint: - image: golang:1.11 + image: golang:1.13 commands: - - go get -v -u github.com/alecthomas/gometalinter - - gometalinter --install - - go get -v -t ./... - - | - gometalinter --disable=gas ./... || : - - | - gometalinter --disable-all --enable=gas ./... || : - - | - gofmt -d . | (! grep '.') || ok=false - - if ! $ok; then exit 1; fi + - make lint BUILD=local build: - image: golang:1.11 + image: golang:1.13 commands: - - go build -i -v ./... + - make build BUILD=local test: - image: golang:1.11 + image: golang:1.13 commands: - - go test ./... + - make test BUILD=local diff --git a/vendor/github.com/packethost/packngo/Makefile b/vendor/github.com/packethost/packngo/Makefile new file mode 100644 index 00000000000..99f7a8abae6 --- /dev/null +++ b/vendor/github.com/packethost/packngo/Makefile @@ -0,0 +1,36 @@ +IMG ?= golang:1.13 + +# enable go modules, disabled CGO + +GOENV ?= GO111MODULE=on CGO_ENABLED=0 +export GO111MODULE=on +export CGO_ENABLED=0 + +# we build in a docker image, unless we are set to BUILD=local +GO ?= docker run --rm -v $(PWD):/app -w /app $(IMG) env $(GOENV) +ifeq ($(BUILD),local) +GO = +endif + + +build: + $(GO) go build -i -v ./... + +golangci-lint: +ifeq (, $(shell which golangci-lint)) + $(GO) go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.24.0 +endif + +golint: +ifeq (, $(shell which golint)) + $(GO) go get -u golang.org/x/lint/golint +endif + +lint: golint golangci-lint + $(GO) golangci-lint run --disable-all --enable=golint ./... + $(GO) go vet ./... + $(GO) gofmt -d . + +test: + $(GO) test ./... + diff --git a/vendor/github.com/packethost/packngo/README.md b/vendor/github.com/packethost/packngo/README.md index 307c4eef767..0ffcb4822d2 100644 --- a/vendor/github.com/packethost/packngo/README.md +++ b/vendor/github.com/packethost/packngo/README.md @@ -1,5 +1,5 @@ # packngo -Packet Go Api Client +A Golang client for the Packet API. ![](https://www.packet.net/media/images/xeiw-packettwitterprofilew.png) @@ -46,7 +46,6 @@ This lib is used by the official [terraform-provider-packet](https://github.com/ You can also learn a lot from the `*_test.go` sources. Almost all out tests touch the Packet API, so you can see how auth, querying and POSTing works. For example [devices_test.go](devices_test.go). - Linked resources in Get\* and List\* functions ---------------------------------------------- Most of the Get and List functions have *GetOptions resp. *ListOptions paramters. If you supply them, you can specify which attributes of resources in the return set can be excluded or included. This is useful for linked resources, e.g members of a project, devices in a project. @@ -125,3 +124,21 @@ Committing ---------- Before committing, it's a good idea to run `gofmt -w *.go`. ([gofmt](https://golang.org/cmd/gofmt/)) + +## Building and Testing + +The [Makefile](./Makefile) contains the targets to build, lint and test: + +```sh +make build +make lint +make test +``` + +These normally will be run in a docker image of golang. To run locally, just run with `BUILD=local`: + +```sh +make build BUILD=local +make lint BUILD=local +make test BUILD=local +``` diff --git a/vendor/github.com/packethost/packngo/apikeys.go b/vendor/github.com/packethost/packngo/apikeys.go new file mode 100644 index 00000000000..d29b340ecbb --- /dev/null +++ b/vendor/github.com/packethost/packngo/apikeys.go @@ -0,0 +1,133 @@ +package packngo + +import ( + "fmt" +) + +const ( + apiKeyUserBasePath = "/user/api-keys" + apiKeyProjectBasePath = "/projects/%s/api-keys" +) + +// APIKeyService interface defines available device methods +type APIKeyService interface { + UserList(*ListOptions) ([]APIKey, *Response, error) + ProjectList(string, *ListOptions) ([]APIKey, *Response, error) + UserGet(string, *GetOptions) (*APIKey, error) + ProjectGet(string, string, *GetOptions) (*APIKey, error) + Create(*APIKeyCreateRequest) (*APIKey, *Response, error) + Delete(string) (*Response, error) +} + +type apiKeyRoot struct { + APIKeys []APIKey `json:"api_keys"` +} + +type APIKey struct { + ID string `json:"id"` + Description string `json:"description"` + Token string `json:"token"` + ReadOnly bool `json:"read_only"` + Created string `json:"created_at"` + Updated string `json:"updated_at"` + User *User `json:"user"` + Project *Project `json:"project"` +} + +// APIKeyCreateRequest type used to create an api key +type APIKeyCreateRequest struct { + Description string `json:"description"` + ReadOnly bool `json:"read_only"` + ProjectID string `json:"-"` +} + +func (s APIKeyCreateRequest) String() string { + return Stringify(s) +} + +// APIKeyServiceOp implements APIKeyService +type APIKeyServiceOp struct { + client *Client +} + +func (s *APIKeyServiceOp) list(url string, lopts *ListOptions) ([]APIKey, *Response, error) { + root := new(apiKeyRoot) + params := createListOptionsURL(lopts) + paramURL := fmt.Sprintf("%s?%s", url, params) + + resp, err := s.client.DoRequest("GET", paramURL, nil, root) + if err != nil { + return nil, resp, err + } + + return root.APIKeys, resp, err +} + +// ProjectList lists api keys of a project +func (s *APIKeyServiceOp) ProjectList(projectID string, lopts *ListOptions) ([]APIKey, *Response, error) { + return s.list(fmt.Sprintf(apiKeyProjectBasePath, projectID), lopts) + +} + +// UserList returns a user's api keys +func (s *APIKeyServiceOp) UserList(lopts *ListOptions) ([]APIKey, *Response, error) { + return s.list(apiKeyUserBasePath, lopts) +} + +// ProjectGet returns an api key by id +func (s *APIKeyServiceOp) ProjectGet(projectID, apiKeyID string, getOpt *GetOptions) (*APIKey, error) { + var lopts *ListOptions + if getOpt != nil { + lopts = &ListOptions{Includes: getOpt.Includes, Excludes: getOpt.Excludes} + } + pkeys, _, err := s.ProjectList(projectID, lopts) + if err != nil { + return nil, err + } + for _, k := range pkeys { + if k.ID == apiKeyID { + return &k, nil + } + } + return nil, fmt.Errorf("Project (%s) API key %s not found", projectID, apiKeyID) +} + +// UserGet returns a project api key by id +func (s *APIKeyServiceOp) UserGet(apiKeyID string, getOpt *GetOptions) (*APIKey, error) { + var lopts *ListOptions + if getOpt != nil { + lopts = &ListOptions{Includes: getOpt.Includes, Excludes: getOpt.Excludes} + } + ukeys, _, err := s.UserList(lopts) + if err != nil { + return nil, err + } + for _, k := range ukeys { + if k.ID == apiKeyID { + return &k, nil + } + } + return nil, fmt.Errorf("User API key %s not found", apiKeyID) +} + +// Create creates a new api key +func (s *APIKeyServiceOp) Create(createRequest *APIKeyCreateRequest) (*APIKey, *Response, error) { + path := apiKeyUserBasePath + if createRequest.ProjectID != "" { + path = fmt.Sprintf(apiKeyProjectBasePath, createRequest.ProjectID) + } + apiKey := new(APIKey) + + resp, err := s.client.DoRequest("POST", path, createRequest, apiKey) + if err != nil { + return nil, resp, err + } + + return apiKey, resp, err +} + +// Delete deletes an api key +func (s *APIKeyServiceOp) Delete(apiKeyID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", apiKeyUserBasePath, apiKeyID) + return s.client.DoRequest("DELETE", path, nil, nil) +} diff --git a/vendor/github.com/packethost/packngo/bgp_configs.go b/vendor/github.com/packethost/packngo/bgp_configs.go index aa21c556138..e5b58b69015 100644 --- a/vendor/github.com/packethost/packngo/bgp_configs.go +++ b/vendor/github.com/packethost/packngo/bgp_configs.go @@ -7,7 +7,7 @@ var bgpConfigBasePath = "/bgp-config" // BGPConfigService interface defines available BGP config methods type BGPConfigService interface { Get(projectID string, getOpt *GetOptions) (*BGPConfig, *Response, error) - Create(string, CreateBGPConfigRequest) (*Response, error) + Create(projectID string, request CreateBGPConfigRequest) (*Response, error) // Delete(configID string) (resp *Response, err error) TODO: Not in Packet API } diff --git a/vendor/github.com/packethost/packngo/bgp_sessions.go b/vendor/github.com/packethost/packngo/bgp_sessions.go index 5562488fe2b..def8ab79808 100644 --- a/vendor/github.com/packethost/packngo/bgp_sessions.go +++ b/vendor/github.com/packethost/packngo/bgp_sessions.go @@ -3,6 +3,7 @@ package packngo import "fmt" var bgpSessionBasePath = "/bgp/sessions" +var bgpNeighborsBasePath = "/bgp/neighbors" // BGPSessionService interface defines available BGP session methods type BGPSessionService interface { @@ -32,6 +33,30 @@ type BGPSession struct { DefaultRoute *bool `json:"default_route,omitempty"` } +type bgpNeighborsRoot struct { + BGPNeighbors []BGPNeighbor `json:"bgp_neighbors"` +} + +// BGPNeighor is struct for listing BGP neighbors of a device +type BGPNeighbor struct { + AddressFamily int `json:"address_family"` + CustomerAs int `json:"customer_as"` + CustomerIP string `json:"customer_ip"` + Md5Enabled bool `json:"md5_enabled"` + Md5Password string `json:"md5_password"` + Multihop bool `json:"multihop"` + PeerAs int `json:"peer_as"` + PeerIps []string `json:"peer_ips"` + RoutesIn []BGPRoute `json:"routes_in"` + RoutesOut []BGPRoute `json:"routes_out"` +} + +// BGPRoute is a struct for Route in BGP neighbor listing +type BGPRoute struct { + Route string `json:"route"` + Exact bool `json:"exact"` +} + // CreateBGPSessionRequest struct type CreateBGPSessionRequest struct { AddressFamily string `json:"address_family"` diff --git a/vendor/github.com/packethost/packngo/connect.go b/vendor/github.com/packethost/packngo/connect.go deleted file mode 100644 index 28e72d1f496..00000000000 --- a/vendor/github.com/packethost/packngo/connect.go +++ /dev/null @@ -1,148 +0,0 @@ -package packngo - -import "fmt" - -const ( - connectBasePath = "/packet-connect/connections" - AzureProviderID = "ed5de8e0-77a9-4d3b-9de0-65281d3aa831" -) - -type ConnectService interface { - List(string, *ListOptions) ([]Connect, *Response, error) - Get(string, string, *GetOptions) (*Connect, *Response, error) - Delete(string, string) (*Response, error) - Create(*ConnectCreateRequest) (*Connect, *Response, error) - Provision(string, string) (*Connect, *Response, error) - Deprovision(string, string, bool) (*Connect, *Response, error) -} - -type ConnectCreateRequest struct { - Name string `json:"name"` - ProjectID string `json:"project_id"` - ProviderID string `json:"provider_id"` - ProviderPayload string `json:"provider_payload"` - Facility string `json:"facility"` - PortSpeed int `json:"port_speed"` - VLAN int `json:"vlan"` - Tags []string `json:"tags,omitempty"` - Description string `json:"description,omitempty"` -} - -type Connect struct { - ID string `json:"id"` - Status string `json:"status"` - Name string `json:"name"` - ProjectID string `json:"project_id"` - ProviderID string `json:"provider_id"` - ProviderPayload string `json:"provider_payload"` - Facility string `json:"facility"` - PortSpeed int `json:"port_speed"` - VLAN int `json:"vlan"` - Description string `json:"description,omitempty"` -} - -type ConnectServiceOp struct { - client *Client -} - -type connectsRoot struct { - Connects []Connect `json:"connections"` - Meta meta `json:"meta"` -} - -func (c *ConnectServiceOp) List(projectID string, listOpt *ListOptions) (connects []Connect, resp *Response, err error) { - params := createListOptionsURL(listOpt) - - project_param := fmt.Sprintf("project_id=%s", projectID) - if params == "" { - params = project_param - } else { - params = fmt.Sprintf("%s&%s", params, project_param) - } - path := fmt.Sprintf("%s/?%s", connectBasePath, params) - - for { - subset := new(connectsRoot) - - resp, err = c.client.DoRequest("GET", path, nil, subset) - if err != nil { - return nil, resp, err - } - - connects = append(connects, subset.Connects...) - - if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) { - path = subset.Meta.Next.Href - if params != "" { - path = fmt.Sprintf("%s&%s", path, params) - } - continue - } - - return - } -} - -func (c *ConnectServiceOp) Deprovision(connectID, projectID string, delete bool) (*Connect, *Response, error) { - params := fmt.Sprintf("project_id=%s&delete=%t", projectID, delete) - path := fmt.Sprintf("%s/%s/deprovision?%s", connectBasePath, connectID, params) - connect := new(Connect) - - resp, err := c.client.DoRequest("POST", path, nil, connect) - if err != nil { - return nil, resp, err - } - - return connect, resp, err -} - -func (c *ConnectServiceOp) Provision(connectID, projectID string) (*Connect, *Response, error) { - params := fmt.Sprintf("project_id=%s", projectID) - path := fmt.Sprintf("%s/%s/provision?%s", connectBasePath, connectID, params) - connect := new(Connect) - - resp, err := c.client.DoRequest("POST", path, nil, connect) - if err != nil { - return nil, resp, err - } - - return connect, resp, err -} - -func (c *ConnectServiceOp) Create(createRequest *ConnectCreateRequest) (*Connect, *Response, error) { - url := fmt.Sprintf("%s", connectBasePath) - connect := new(Connect) - - resp, err := c.client.DoRequest("POST", url, createRequest, connect) - if err != nil { - return nil, resp, err - } - - return connect, resp, err -} - -func (c *ConnectServiceOp) Get(connectID, projectID string, getOpt *GetOptions) (*Connect, *Response, error) { - params := createGetOptionsURL(getOpt) - project_param := fmt.Sprintf("project_id=%s", projectID) - if params == "" { - params = project_param - } else { - params = fmt.Sprintf("%s&%s", params, project_param) - } - path := fmt.Sprintf("%s/%s?%s", connectBasePath, connectID, params) - connect := new(Connect) - - resp, err := c.client.DoRequest("GET", path, nil, connect) - if err != nil { - return nil, resp, err - } - - return connect, resp, err -} - -func (c *ConnectServiceOp) Delete(connectID, projectID string) (*Response, error) { - path := fmt.Sprintf("%s/%s?project_id=%s", connectBasePath, connectID, - projectID) - - return c.client.DoRequest("DELETE", path, nil, nil) -} diff --git a/vendor/github.com/packethost/packngo/devices.go b/vendor/github.com/packethost/packngo/devices.go index 239efa59d49..a0e8c78a359 100644 --- a/vendor/github.com/packethost/packngo/devices.go +++ b/vendor/github.com/packethost/packngo/devices.go @@ -1,7 +1,6 @@ package packngo import ( - "encoding/json" "fmt" ) @@ -13,13 +12,14 @@ type DeviceService interface { Get(DeviceID string, getOpt *GetOptions) (*Device, *Response, error) Create(*DeviceCreateRequest) (*Device, *Response, error) Update(string, *DeviceUpdateRequest) (*Device, *Response, error) - Delete(string) (*Response, error) + Delete(string, bool) (*Response, error) Reboot(string) (*Response, error) PowerOff(string) (*Response, error) PowerOn(string) (*Response, error) Lock(string) (*Response, error) Unlock(string) (*Response, error) ListBGPSessions(deviceID string, listOpt *ListOptions) ([]BGPSession, *Response, error) + ListBGPNeighbors(deviceID string, listOpt *ListOptions) ([]BGPNeighbor, *Response, error) ListEvents(string, *ListOptions) ([]Event, *Response, error) } @@ -28,8 +28,8 @@ type devicesRoot struct { Meta meta `json:"meta"` } -// DeviceRaw represents a Packet device from API -type DeviceRaw struct { +// Device represents a Packet device from API +type Device struct { ID string `json:"id"` Href string `json:"href,omitempty"` Hostname string `json:"hostname,omitempty"` @@ -38,7 +38,7 @@ type DeviceRaw struct { Updated string `json:"updated_at,omitempty"` Locked bool `json:"locked,omitempty"` BillingCycle string `json:"billing_cycle,omitempty"` - Storage map[string]interface{} `json:"storage,omitempty"` + Storage *CPR `json:"storage,omitempty"` Tags []string `json:"tags,omitempty"` Network []*IPAddressAssignment `json:"ip_addresses"` Volumes []*Volume `json:"volumes"` @@ -60,27 +60,7 @@ type DeviceRaw struct { CustomData map[string]interface{} `json:"customdata,omitempty"` SSHKeys []SSHKey `json:"ssh_keys,omitempty"` ShortID string `json:"short_id,omitempty"` -} - -type Device struct { - DeviceRaw - NetworkType string -} - -func (d *Device) UnmarshalJSON(b []byte) error { - dJSON := DeviceRaw{} - if err := json.Unmarshal(b, &dJSON); err != nil { - return err - } - d.DeviceRaw = dJSON - if len(dJSON.NetworkPorts) > 0 { - networkType, err := dJSON.GetNetworkType() - if err != nil { - return err - } - d.NetworkType = networkType - } - return nil + SwitchUUID string `json:"switch_uuid,omitempty"` } type NetworkInfo struct { @@ -112,21 +92,124 @@ func (d Device) String() string { return Stringify(d) } -func (d DeviceRaw) GetNetworkType() (string, error) { - if len(d.NetworkPorts) == 0 { - return "", fmt.Errorf("Device has no network ports listed") - } +func (d *Device) NumOfBonds() int { + numOfBonds := 0 for _, p := range d.NetworkPorts { - if p.Name == "bond0" { - return p.NetworkType, nil + if p.Type == "NetworkBondPort" { + numOfBonds++ } } - return "", fmt.Errorf("Bound port not found") + return numOfBonds +} + +func (d *Device) GetPortsInBond(name string) map[string]*Port { + ports := map[string]*Port{} + for _, port := range d.NetworkPorts { + if port.Bond != nil && port.Bond.Name == name { + p := port + ports[p.Name] = &p + } + } + return ports +} + +func (d *Device) GetBondPorts() map[string]*Port { + ports := map[string]*Port{} + for _, port := range d.NetworkPorts { + if port.Type == "NetworkBondPort" { + p := port + ports[p.Name] = &p + } + } + return ports +} + +func (d *Device) GetPhysicalPorts() map[string]*Port { + ports := map[string]*Port{} + for _, port := range d.NetworkPorts { + if port.Type == "NetworkPort" { + p := port + ports[p.Name] = &p + } + } + return ports +} + +func (d *Device) GetPortByName(name string) (*Port, error) { + for _, port := range d.NetworkPorts { + if port.Name == name { + return &port, nil + } + } + return nil, fmt.Errorf("Port %s not found in device %s", name, d.ID) +} + +func (d *Device) GetNetworkType() (string, error) { + numOfBonds := d.NumOfBonds() + if (numOfBonds < 1) || (numOfBonds > 2) { + return "", fmt.Errorf("Wrong number of Bond ports") + } + bond0, err := d.GetPortByName("bond0") + if err != nil { + return "", err + } + if numOfBonds == 2 { + bond1, err := d.GetPortByName("bond1") + if err != nil { + return "", err + } + if bond0.NetworkType == bond1.NetworkType { + return bond0.NetworkType, nil + } + if (bond0.NetworkType == "layer3") && (bond1.NetworkType == "layer2-individual") { + return "hybrid", nil + } + return "", fmt.Errorf("Strange 2-bond ports conf - bond0: %s, bond1: %s", bond0.NetworkType, bond1.NetworkType) + } + return bond0.NetworkType, nil } type IPAddressCreateRequest struct { - AddressFamily int `json:"address_family"` - Public bool `json:"public"` + AddressFamily int `json:"address_family"` + Public bool `json:"public"` + CIDR int `json:"cidr,omitempty"` + Reservations []string `json:"ip_reservations,omitempty"` +} + +// CPR is a struct for custom partitioning and RAID +// If you don't want to bother writing the struct, just write the CPR conf to +// a string and then do +// +// var cpr CPR +// err := json.Unmarshal([]byte(cprString), &cpr) +// if err != nil { +// log.Fatal(err) +// } +type CPR struct { + Disks []struct { + Device string `json:"device"` + WipeTable bool `json:"wipeTable"` + Partitions []struct { + Label string `json:"label"` + Number int `json:"number"` + Size string `json:"size"` + } `json:"partitions"` + } `json:"disks"` + Raid []struct { + Devices []string `json:"devices"` + Level string `json:"level"` + Name string `json:"name"` + } `json:"raid,omitempty"` + Filesystems []struct { + Mount struct { + Device string `json:"device"` + Format string `json:"format"` + Point string `json:"point"` + Create struct { + Options []string `json:"options"` + } `json:"create"` + } `json:"mount"` + } `json:"filesystems"` } // DeviceCreateRequest type used to create a Packet device @@ -138,7 +221,7 @@ type DeviceCreateRequest struct { BillingCycle string `json:"billing_cycle"` ProjectID string `json:"project_id"` UserData string `json:"userdata"` - Storage string `json:"storage,omitempty"` + Storage *CPR `json:"storage,omitempty"` Tags []string `json:"tags"` IPXEScriptURL string `json:"ipxe_script_url,omitempty"` PublicIPv4SubnetSize int `json:"public_ipv4_subnet_size,omitempty"` @@ -151,7 +234,7 @@ type DeviceCreateRequest struct { // UserSSHKeys is a list of user UUIDs - essentialy a list of // collaborators. The users must be a collaborator in the same project // where the device is created. The user's SSH keys then go to the - // device. + // device UserSSHKeys []string `json:"user_ssh_keys,omitempty"` // Project SSHKeys is a list of SSHKeys resource UUIDs. If this param // is supplied, only the listed SSHKeys will go to the device. @@ -183,6 +266,10 @@ type DeviceActionRequest struct { Type string `json:"type"` } +type DeviceDeleteRequest struct { + Force bool `json:"force_delete"` +} + func (d DeviceActionRequest) String() string { return Stringify(d) } @@ -260,10 +347,11 @@ func (s *DeviceServiceOp) Update(deviceID string, updateRequest *DeviceUpdateReq } // Delete deletes a device -func (s *DeviceServiceOp) Delete(deviceID string) (*Response, error) { +func (s *DeviceServiceOp) Delete(deviceID string, force bool) (*Response, error) { path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID) + req := &DeviceDeleteRequest{Force: force} - return s.client.DoRequest("DELETE", path, nil, nil) + return s.client.DoRequest("DELETE", path, req, nil) } // Reboot reboots on a device @@ -310,6 +398,19 @@ func (s *DeviceServiceOp) Unlock(deviceID string) (*Response, error) { return s.client.DoRequest("PATCH", path, action, nil) } +func (s *DeviceServiceOp) ListBGPNeighbors(deviceID string, listOpt *ListOptions) ([]BGPNeighbor, *Response, error) { + root := new(bgpNeighborsRoot) + params := createListOptionsURL(listOpt) + path := fmt.Sprintf("%s/%s%s?%s", deviceBasePath, deviceID, bgpNeighborsBasePath, params) + + resp, err := s.client.DoRequest("GET", path, nil, root) + if err != nil { + return nil, resp, err + } + + return root.BGPNeighbors, resp, err +} + // ListBGPSessions returns all BGP Sessions associated with the device func (s *DeviceServiceOp) ListBGPSessions(deviceID string, listOpt *ListOptions) (bgpSessions []BGPSession, resp *Response, err error) { params := createListOptionsURL(listOpt) diff --git a/vendor/github.com/packethost/packngo/go.mod b/vendor/github.com/packethost/packngo/go.mod index 10ef6856dde..8a05f1a4007 100644 --- a/vendor/github.com/packethost/packngo/go.mod +++ b/vendor/github.com/packethost/packngo/go.mod @@ -1,7 +1,9 @@ module github.com/packethost/packngo +go 1.13 + require ( - github.com/stretchr/testify v1.3.0 - golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 - golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect + github.com/hashicorp/go-retryablehttp v0.6.6 + github.com/stretchr/testify v1.5.1 + golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a ) diff --git a/vendor/github.com/packethost/packngo/go.sum b/vendor/github.com/packethost/packngo/go.sum index ec31c502166..b2d833de6a0 100644 --- a/vendor/github.com/packethost/packngo/go.sum +++ b/vendor/github.com/packethost/packngo/go.sum @@ -1,11 +1,27 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a h1:y6sBfNd1b9Wy08a6K1Z1DZc4aXABUN5TKjkYhz7UKmo= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/packethost/packngo/ip.go b/vendor/github.com/packethost/packngo/ip.go index edb3addb302..43e4dfe3e28 100644 --- a/vendor/github.com/packethost/packngo/ip.go +++ b/vendor/github.com/packethost/packngo/ip.go @@ -6,11 +6,27 @@ import ( const ipBasePath = "/ips" +const ( + // PublicIPv4 fixed string representation of public ipv4 + PublicIPv4 = "public_ipv4" + // PrivateIPv4 fixed string representation of private ipv4 + PrivateIPv4 = "private_ipv4" + // GlobalIPv4 fixed string representation of global ipv4 + GlobalIPv4 = "global_ipv4" + // PublicIPv6 fixed string representation of public ipv6 + PublicIPv6 = "public_ipv6" + // PrivateIPv6 fixed string representation of private ipv6 + PrivateIPv6 = "private_ipv6" + // GlobalIPv6 fixed string representation of global ipv6 + GlobalIPv6 = "global_ipv6" +) + // DeviceIPService handles assignment of addresses from reserved blocks to instances in a project. type DeviceIPService interface { Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error) Unassign(assignmentID string) (*Response, error) Get(assignmentID string, getOpt *GetOptions) (*IPAddressAssignment, *Response, error) + List(deviceID string, listOpt *ListOptions) ([]IPAddressAssignment, *Response, error) } // ProjectIPService handles reservation of IP address blocks for a project. @@ -22,22 +38,24 @@ type ProjectIPService interface { AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error) } -type IpAddressCommon struct { - ID string `json:"id"` - Address string `json:"address"` - Gateway string `json:"gateway"` - Network string `json:"network"` - AddressFamily int `json:"address_family"` - Netmask string `json:"netmask"` - Public bool `json:"public"` - CIDR int `json:"cidr"` - Created string `json:"created_at,omitempty"` - Updated string `json:"updated_at,omitempty"` - Href string `json:"href"` - Management bool `json:"management"` - Manageable bool `json:"manageable"` - Project Href `json:"project"` - Global *bool `json:"global_ip"` +type IpAddressCommon struct { //nolint:golint + ID string `json:"id"` + Address string `json:"address"` + Gateway string `json:"gateway"` + Network string `json:"network"` + AddressFamily int `json:"address_family"` + Netmask string `json:"netmask"` + Public bool `json:"public"` + CIDR int `json:"cidr"` + Created string `json:"created_at,omitempty"` + Updated string `json:"updated_at,omitempty"` + Href string `json:"href"` + Management bool `json:"management"` + Manageable bool `json:"manageable"` + Project Href `json:"project"` + Global *bool `json:"global_ip"` + Tags []string `json:"tags,omitempty"` + CustomData map[string]interface{} `json:"customdata,omitempty"` } // IPAddressReservation is created when user sends IP reservation request for a project (considering it's within quota). @@ -69,10 +87,12 @@ type IPAddressAssignment struct { // IPReservationRequest represents the body of a reservation request. type IPReservationRequest struct { - Type string `json:"type"` - Quantity int `json:"quantity"` - Description string `json:"details,omitempty"` - Facility *string `json:"facility,omitempty"` + Type string `json:"type"` + Quantity int `json:"quantity"` + Description string `json:"details,omitempty"` + Facility *string `json:"facility,omitempty"` + Tags []string `json:"tags,omitempty"` + CustomData map[string]interface{} `json:"customdata,omitempty"` } // AddressStruct is a helper type for request/response with dict like {"address": ... } @@ -134,6 +154,27 @@ func (i *DeviceIPServiceOp) Get(assignmentID string, getOpt *GetOptions) (*IPAdd return ipa, resp, err } +// List list all of the IP address assignments on a device +func (i *DeviceIPServiceOp) List(deviceID string, listOpt *ListOptions) ([]IPAddressAssignment, *Response, error) { + params := createListOptionsURL(listOpt) + + path := fmt.Sprintf("%s/%s%s?%s", deviceBasePath, deviceID, ipBasePath, params) + + //ipList represents collection of IP Address reservations + type ipList struct { + IPs []IPAddressAssignment `json:"ip_addresses,omitempty"` + } + + ips := new(ipList) + + resp, err := i.client.DoRequest("GET", path, nil, ips) + if err != nil { + return nil, resp, err + } + + return ips.IPs, resp, err +} + // ProjectIPServiceOp is interface for IP assignment methods. type ProjectIPServiceOp struct { client *Client diff --git a/vendor/github.com/packethost/packngo/packngo.go b/vendor/github.com/packethost/packngo/packngo.go index 47b89a29fa4..a41975f7540 100644 --- a/vendor/github.com/packethost/packngo/packngo.go +++ b/vendor/github.com/packethost/packngo/packngo.go @@ -2,6 +2,8 @@ package packngo import ( "bytes" + "context" + "crypto/x509" "encoding/json" "fmt" "io" @@ -11,9 +13,12 @@ import ( "net/http/httputil" "net/url" "os" + "regexp" "strconv" "strings" "time" + + "github.com/hashicorp/go-retryablehttp" ) const ( @@ -29,6 +34,8 @@ const ( headerRateReset = "X-RateLimit-Reset" ) +var redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + type GetOptions struct { Includes []string Excludes []string @@ -163,7 +170,7 @@ func (r *ErrorResponse) Error() string { // Client is the base API Client type Client struct { - client *http.Client + client *retryablehttp.Client debug bool BaseURL *url.URL @@ -175,37 +182,37 @@ type Client struct { RateLimit Rate // Packet Api Objects - Plans PlanService - Users UserService - Emails EmailService - SSHKeys SSHKeyService + APIKeys APIKeyService + BGPConfig BGPConfigService + BGPSessions BGPSessionService + Batches BatchService + CapacityService CapacityService + DeviceIPs DeviceIPService + DevicePorts DevicePortService Devices DeviceService - Projects ProjectService + Emails EmailService + Events EventService Facilities FacilityService + HardwareReservations HardwareReservationService + Notifications NotificationService OperatingSystems OSService - DeviceIPs DeviceIPService - DevicePorts DevicePortService + Organizations OrganizationService + Plans PlanService ProjectIPs ProjectIPService ProjectVirtualNetworks ProjectVirtualNetworkService - Volumes VolumeService - VolumeAttachments VolumeAttachmentService + Projects ProjectService + SSHKeys SSHKeyService SpotMarket SpotMarketService SpotMarketRequests SpotMarketRequestService - Organizations OrganizationService - BGPSessions BGPSessionService - BGPConfig BGPConfigService - CapacityService CapacityService - Batches BatchService TwoFactorAuth TwoFactorAuthService + Users UserService VPN VPNService - HardwareReservations HardwareReservationService - Events EventService - Notifications NotificationService - Connects ConnectService + VolumeAttachments VolumeAttachmentService + Volumes VolumeService } // NewRequest inits a new http request with the proper headers -func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) { +func (c *Client) NewRequest(method, path string, body interface{}) (*retryablehttp.Request, error) { // relative path to append to the endpoint url, no leading slash please rel, err := url.Parse(path) if err != nil { @@ -223,7 +230,7 @@ func (c *Client) NewRequest(method, path string, body interface{}) (*http.Reques } } - req, err := http.NewRequest(method, u.String(), buf) + req, err := retryablehttp.NewRequest(method, u.String(), buf) if err != nil { return nil, err } @@ -240,7 +247,7 @@ func (c *Client) NewRequest(method, path string, body interface{}) (*http.Reques } // Do executes the http request -func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { +func (c *Client) Do(req *retryablehttp.Request, v interface{}) (*Response, error) { resp, err := c.client.Do(req) if err != nil { return nil, err @@ -251,8 +258,7 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { response := Response{Response: resp} response.populateRate() if c.debug { - o, _ := httputil.DumpResponse(response.Response, true) - log.Printf("\n=======[RESPONSE]============\n%s\n\n", string(o)) + dumpResponse(response.Response) } c.RateLimit = response.Rate @@ -277,13 +283,35 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { return &response, err } +func dumpResponse(resp *http.Response) { + o, _ := httputil.DumpResponse(resp, true) + strResp := string(o) + reg, _ := regexp.Compile(`"token":(.+?),`) + reMatches := reg.FindStringSubmatch(strResp) + if len(reMatches) == 2 { + strResp = strings.Replace(strResp, reMatches[1], strings.Repeat("-", len(reMatches[1])), 1) + } + log.Printf("\n=======[RESPONSE]============\n%s\n\n", strResp) +} + +func dumpRequest(req *retryablehttp.Request) { + o, _ := httputil.DumpRequestOut(req.Request, false) + strReq := string(o) + reg, _ := regexp.Compile(`X-Auth-Token: (\w*)`) + reMatches := reg.FindStringSubmatch(strReq) + if len(reMatches) == 2 { + strReq = strings.Replace(strReq, reMatches[1], strings.Repeat("-", len(reMatches[1])), 1) + } + bbs, _ := req.BodyBytes() + log.Printf("\n=======[REQUEST]=============\n%s%s\n", strReq, string(bbs)) +} + // DoRequest is a convenience method, it calls NewRequest followed by Do // v is the interface to unmarshal the response JSON into func (c *Client) DoRequest(method, path string, body, v interface{}) (*Response, error) { req, err := c.NewRequest(method, path, body) if c.debug { - o, _ := httputil.DumpRequestOut(req, true) - log.Printf("\n=======[REQUEST]=============\n%s\n", string(o)) + dumpRequest(req) } if err != nil { return nil, err @@ -299,8 +327,7 @@ func (c *Client) DoRequestWithHeader(method string, headers map[string]string, p } if c.debug { - o, _ := httputil.DumpRequestOut(req, true) - log.Printf("\n=======[REQUEST]=============\n%s\n", string(o)) + dumpRequest(req) } if err != nil { return nil, err @@ -323,19 +350,57 @@ func NewClient() (*Client, error) { // N.B.: Packet's API certificate requires Go 1.5+ to successfully parse. If you are using // an older version of Go, pass in a custom http.Client with a custom TLS configuration // that sets "InsecureSkipVerify" to "true" -func NewClientWithAuth(consumerToken string, apiKey string, httpClient *http.Client) *Client { +func NewClientWithAuth(consumerToken string, apiKey string, httpClient *retryablehttp.Client) *Client { client, _ := NewClientWithBaseURL(consumerToken, apiKey, httpClient, baseURL) return client } +func PacketRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + //if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + // return true, nil + //} + + return false, nil +} + // NewClientWithBaseURL returns a Client pointing to nonstandard API URL, e.g. // for mocking the remote API -func NewClientWithBaseURL(consumerToken string, apiKey string, httpClient *http.Client, apiBaseURL string) (*Client, error) { +func NewClientWithBaseURL(consumerToken string, apiKey string, httpClient *retryablehttp.Client, apiBaseURL string) (*Client, error) { if httpClient == nil { // Don't fall back on http.DefaultClient as it's not nice to adjust state // implicitly. If the client wants to use http.DefaultClient, they can // pass it in explicitly. - httpClient = &http.Client{} + httpClient = retryablehttp.NewClient() + httpClient.RetryWaitMin = time.Second + httpClient.RetryWaitMax = 30 * time.Second + httpClient.RetryMax = 10 + httpClient.CheckRetry = PacketRetryPolicy } u, err := url.Parse(apiBaseURL) @@ -344,34 +409,34 @@ func NewClientWithBaseURL(consumerToken string, apiKey string, httpClient *http. } c := &Client{client: httpClient, BaseURL: u, UserAgent: userAgent, ConsumerToken: consumerToken, APIKey: apiKey} - c.debug = os.Getenv(debugEnvVar) != "" - c.Plans = &PlanServiceOp{client: c} - c.Organizations = &OrganizationServiceOp{client: c} - c.Users = &UserServiceOp{client: c} - c.Emails = &EmailServiceOp{client: c} - c.SSHKeys = &SSHKeyServiceOp{client: c} + c.APIKeys = &APIKeyServiceOp{client: c} + c.BGPConfig = &BGPConfigServiceOp{client: c} + c.BGPSessions = &BGPSessionServiceOp{client: c} + c.Batches = &BatchServiceOp{client: c} + c.CapacityService = &CapacityServiceOp{client: c} + c.DeviceIPs = &DeviceIPServiceOp{client: c} + c.DevicePorts = &DevicePortServiceOp{client: c} c.Devices = &DeviceServiceOp{client: c} - c.Projects = &ProjectServiceOp{client: c} + c.Emails = &EmailServiceOp{client: c} + c.Events = &EventServiceOp{client: c} c.Facilities = &FacilityServiceOp{client: c} + c.HardwareReservations = &HardwareReservationServiceOp{client: c} + c.Notifications = &NotificationServiceOp{client: c} c.OperatingSystems = &OSServiceOp{client: c} - c.DeviceIPs = &DeviceIPServiceOp{client: c} - c.DevicePorts = &DevicePortServiceOp{client: c} - c.ProjectVirtualNetworks = &ProjectVirtualNetworkServiceOp{client: c} + c.Organizations = &OrganizationServiceOp{client: c} + c.Plans = &PlanServiceOp{client: c} c.ProjectIPs = &ProjectIPServiceOp{client: c} - c.Volumes = &VolumeServiceOp{client: c} - c.VolumeAttachments = &VolumeAttachmentServiceOp{client: c} + c.ProjectVirtualNetworks = &ProjectVirtualNetworkServiceOp{client: c} + c.Projects = &ProjectServiceOp{client: c} + c.SSHKeys = &SSHKeyServiceOp{client: c} c.SpotMarket = &SpotMarketServiceOp{client: c} - c.BGPSessions = &BGPSessionServiceOp{client: c} - c.BGPConfig = &BGPConfigServiceOp{client: c} - c.CapacityService = &CapacityServiceOp{client: c} - c.Batches = &BatchServiceOp{client: c} + c.SpotMarketRequests = &SpotMarketRequestServiceOp{client: c} c.TwoFactorAuth = &TwoFactorAuthServiceOp{client: c} + c.Users = &UserServiceOp{client: c} c.VPN = &VPNServiceOp{client: c} - c.HardwareReservations = &HardwareReservationServiceOp{client: c} - c.SpotMarketRequests = &SpotMarketRequestServiceOp{client: c} - c.Events = &EventServiceOp{client: c} - c.Notifications = &NotificationServiceOp{client: c} - c.Connects = &ConnectServiceOp{client: c} + c.VolumeAttachments = &VolumeAttachmentServiceOp{client: c} + c.Volumes = &VolumeServiceOp{client: c} + c.debug = os.Getenv(debugEnvVar) != "" return c, nil } diff --git a/vendor/github.com/packethost/packngo/ports.go b/vendor/github.com/packethost/packngo/ports.go index 5c0bab89819..d4c8a94879f 100644 --- a/vendor/github.com/packethost/packngo/ports.go +++ b/vendor/github.com/packethost/packngo/ports.go @@ -2,6 +2,7 @@ package packngo import ( "fmt" + "strings" ) const portBasePath = "/ports" @@ -12,16 +13,15 @@ type DevicePortService interface { Unassign(*PortAssignRequest) (*Port, *Response, error) AssignNative(*PortAssignRequest) (*Port, *Response, error) UnassignNative(string) (*Port, *Response, error) - Bond(*BondRequest) (*Port, *Response, error) - Disbond(*DisbondRequest) (*Port, *Response, error) + Bond(*Port, bool) (*Port, *Response, error) + Disbond(*Port, bool) (*Port, *Response, error) DeviceToNetworkType(string, string) (*Device, error) DeviceNetworkType(string) (string, error) - PortToLayerTwo(string) (*Port, *Response, error) - PortToLayerThree(string) (*Port, *Response, error) - DeviceToLayerTwo(string) (*Device, error) - DeviceToLayerThree(string) (*Device, error) - GetBondPort(string) (*Port, error) + PortToLayerTwo(string, string) (*Port, *Response, error) + PortToLayerThree(string, string) (*Port, *Response, error) GetPortByName(string, string) (*Port, error) + Convert1BondDevice(*Device, string) error + Convert2BondDevice(*Device, string) error } type PortData struct { @@ -29,6 +29,11 @@ type PortData struct { Bonded bool `json:"bonded"` } +type BondData struct { + ID string `json:"id"` + Name string `json:"name"` +} + type Port struct { ID string `json:"id"` Type string `json:"type"` @@ -37,6 +42,7 @@ type Port struct { NetworkType string `json:"network_type,omitempty"` NativeVirtualNetwork *VirtualNetwork `json:"native_virtual_network"` AttachedVirtualNetworks []VirtualNetwork `json:"virtual_networks"` + Bond *BondData `json:"bond"` } type AddressRequest struct { @@ -67,32 +73,12 @@ type DisbondRequest struct { BulkDisable bool `json:"bulk_disable"` } -func (i *DevicePortServiceOp) GetBondPort(deviceID string) (*Port, error) { - device, _, err := i.client.Devices.Get(deviceID, nil) - if err != nil { - return nil, err - } - for _, port := range device.NetworkPorts { - if port.Type == "NetworkBondPort" { - return &port, nil - } - } - - return nil, fmt.Errorf("No bonded port found in device %s", deviceID) -} - func (i *DevicePortServiceOp) GetPortByName(deviceID, name string) (*Port, error) { device, _, err := i.client.Devices.Get(deviceID, nil) if err != nil { return nil, err } - for _, port := range device.NetworkPorts { - if port.Name == name { - return &port, nil - } - } - - return nil, fmt.Errorf("Port %s not found in device %s", name, deviceID) + return device.GetPortByName(name) } func (i *DevicePortServiceOp) Assign(par *PortAssignRequest) (*Port, *Response, error) { @@ -122,12 +108,20 @@ func (i *DevicePortServiceOp) Unassign(par *PortAssignRequest) (*Port, *Response return i.portAction(path, par) } -func (i *DevicePortServiceOp) Bond(br *BondRequest) (*Port, *Response, error) { +func (i *DevicePortServiceOp) Bond(p *Port, be bool) (*Port, *Response, error) { + if p.Data.Bonded { + return p, nil, nil + } + br := &BondRequest{PortID: p.ID, BulkEnable: be} path := fmt.Sprintf("%s/%s/bond", portBasePath, br.PortID) return i.portAction(path, br) } -func (i *DevicePortServiceOp) Disbond(dr *DisbondRequest) (*Port, *Response, error) { +func (i *DevicePortServiceOp) Disbond(p *Port, bd bool) (*Port, *Response, error) { + if !p.Data.Bonded { + return p, nil, nil + } + dr := &DisbondRequest{PortID: p.ID, BulkDisable: bd} path := fmt.Sprintf("%s/%s/disbond", portBasePath, dr.PortID) return i.portAction(path, dr) } @@ -143,8 +137,15 @@ func (i *DevicePortServiceOp) portAction(path string, req interface{}) (*Port, * return port, resp, err } -func (i *DevicePortServiceOp) PortToLayerTwo(portID string) (*Port, *Response, error) { - path := fmt.Sprintf("%s/%s/convert/layer-2", portBasePath, portID) +func (i *DevicePortServiceOp) PortToLayerTwo(deviceID, portName string) (*Port, *Response, error) { + p, err := i.client.DevicePorts.GetPortByName(deviceID, portName) + if err != nil { + return nil, nil, err + } + if strings.HasPrefix(p.NetworkType, "layer2") { + return p, nil, nil + } + path := fmt.Sprintf("%s/%s/convert/layer-2", portBasePath, p.ID) port := new(Port) resp, err := i.client.DoRequest("POST", path, nil, port) @@ -155,15 +156,22 @@ func (i *DevicePortServiceOp) PortToLayerTwo(portID string) (*Port, *Response, e return port, resp, err } -func (i *DevicePortServiceOp) PortToLayerThree(portID string) (*Port, *Response, error) { - path := fmt.Sprintf("%s/%s/convert/layer-3", portBasePath, portID) +func (i *DevicePortServiceOp) PortToLayerThree(deviceID, portName string) (*Port, *Response, error) { + p, err := i.client.DevicePorts.GetPortByName(deviceID, portName) + if err != nil { + return nil, nil, err + } + if p.NetworkType == "layer3" { + return p, nil, nil + } + path := fmt.Sprintf("%s/%s/convert/layer-3", portBasePath, p.ID) port := new(Port) req := BackToL3Request{ RequestIPs: []AddressRequest{ - AddressRequest{AddressFamily: 4, Public: true}, - AddressRequest{AddressFamily: 4, Public: false}, - AddressRequest{AddressFamily: 6, Public: true}, + {AddressFamily: 4, Public: true}, + {AddressFamily: 4, Public: false}, + {AddressFamily: 6, Public: true}, }, } @@ -180,142 +188,181 @@ func (i *DevicePortServiceOp) DeviceNetworkType(deviceID string) (string, error) if err != nil { return "", err } - return d.NetworkType, nil + return d.GetNetworkType() } -func (i *DevicePortServiceOp) DeviceToNetworkType(deviceID string, nType string) (*Device, error) { - - d, _, err := i.client.Devices.Get(deviceID, nil) - if err != nil { - return nil, err - } - - curType := d.NetworkType +func (i *DevicePortServiceOp) Convert2BondDevice(d *Device, targetType string) error { + bondPorts := d.GetBondPorts() + ethPorts := d.GetPhysicalPorts() - if curType == nType { - return nil, fmt.Errorf("Device already is in state %s", nType) - } - bond0ID := "" - eth1ID := "" - for _, port := range d.NetworkPorts { - if port.Name == "bond0" { - bond0ID = port.ID + if targetType == "layer3" { + for _, p := range ethPorts { + _, _, err := i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } } - if port.Name == "eth1" { - eth1ID = port.ID + for _, p := range bondPorts { + _, _, err := i.client.DevicePorts.PortToLayerThree(d.ID, p.Name) + if err != nil { + return err + } } } - - if nType == "layer3" { - if curType == "layer2-individual" || curType == "layer2-bonded" { - if curType == "layer2-individual" { - _, _, err := i.client.DevicePorts.Bond( - &BondRequest{PortID: bond0ID, BulkEnable: false}) - if err != nil { - return nil, err - } - - } - _, _, err := i.client.DevicePorts.PortToLayerThree(bond0ID) + if targetType == "hybrid" { + for _, p := range d.GetPortsInBond("bond1") { + _, _, err := i.client.DevicePorts.Disbond(p, false) if err != nil { - return nil, err + return err } } - _, _, err = i.client.DevicePorts.Bond( - &BondRequest{PortID: bond0ID, BulkEnable: true}) + _, _, err := i.client.DevicePorts.PortToLayerThree(d.ID, "bond0") + if err != nil { + return err + } + _, _, err = i.client.DevicePorts.PortToLayerTwo(d.ID, "bond1") if err != nil { - return nil, err + return err } } - if nType == "hybrid" { - if curType == "layer2-individual" || curType == "layer2-bonded" { - if curType == "layer2-individual" { - _, _, err = i.client.DevicePorts.Bond( - &BondRequest{PortID: bond0ID, BulkEnable: false}) - if err != nil { - return nil, err - } - } - _, _, err = i.client.DevicePorts.PortToLayerThree(bond0ID) + if targetType == "layer2-individual" { + for _, p := range bondPorts { + _, _, err := i.client.DevicePorts.PortToLayerTwo(d.ID, p.Name) if err != nil { - return nil, err + return err } } - _, _, err := i.client.DevicePorts.Disbond( - &DisbondRequest{PortID: eth1ID, BulkDisable: false}) - if err != nil { - return nil, err + for _, p := range ethPorts { + _, _, err := i.client.DevicePorts.Disbond(p, false) + if err != nil { + return err + } } } - if nType == "layer2-individual" { - if curType == "hybrid" || curType == "layer3" { - _, _, err = i.client.DevicePorts.PortToLayerTwo(bond0ID) + if targetType == "layer2-bonded" { + for _, p := range bondPorts { + _, _, err := i.client.DevicePorts.PortToLayerTwo(d.ID, p.Name) if err != nil { - return nil, err + return err } + } + for _, p := range ethPorts { + _, _, err := i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } + } + } + return nil +} + +func (i *DevicePortServiceOp) Convert1BondDevice(d *Device, targetType string) error { + bond0, err := d.GetPortByName("bond0") + if err != nil { + return err + } + bond0ports := d.GetPortsInBond("bond0") + if targetType == "layer3" { + for _, p := range bond0ports { + _, _, err = i.client.DevicePorts.Bond(p, false) + if err != nil { + return err + } } - _, _, err = i.client.DevicePorts.Disbond( - &DisbondRequest{PortID: bond0ID, BulkDisable: true}) + _, _, err = i.client.DevicePorts.PortToLayerThree(d.ID, "bond0") if err != nil { - return nil, err + return err } } - if nType == "layer2-bonded" { - if curType == "hybrid" || curType == "layer3" { - _, _, err = i.client.DevicePorts.PortToLayerTwo(bond0ID) + if targetType == "hybrid" { + bond0, _, err = i.client.DevicePorts.Bond(bond0, false) + if err != nil { + return err + } + bond0, _, err = i.client.DevicePorts.PortToLayerThree(d.ID, "bond0") + if err != nil { + return err + } + eth1, err := i.client.DevicePorts.GetPortByName(d.ID, "eth1") + if err != nil { + return err + } + _, _, err = i.client.DevicePorts.Disbond(eth1, false) + if err != nil { + return err + } + } + if targetType == "layer2-individual" { + bond0, _, err = i.client.DevicePorts.PortToLayerTwo(d.ID, "bond0") + if err != nil { + return err + } + _, _, err = i.client.DevicePorts.Disbond(bond0, true) + if err != nil { + return err + } + } + if targetType == "layer2-bonded" { + for _, p := range bond0ports { + _, _, err = i.client.DevicePorts.Bond(p, false) if err != nil { - return nil, err + return err } } - _, _, err = i.client.DevicePorts.Bond( - &BondRequest{PortID: bond0ID, BulkEnable: false}) + bond0, _, err = i.client.DevicePorts.PortToLayerTwo(d.ID, "bond0") if err != nil { - return nil, err + return err } } + return nil +} - d, _, err = i.client.Devices.Get(deviceID, nil) +func (i *DevicePortServiceOp) DeviceToNetworkType(deviceID string, targetType string) (*Device, error) { + + d, _, err := i.client.Devices.Get(deviceID, nil) if err != nil { return nil, err } - if d.NetworkType != nType { - return nil, fmt.Errorf( - "Failed to convert device %s from %s to %s. New type was %s", - deviceID, curType, nType, d.NetworkType) + curType, err := d.GetNetworkType() + if err != nil { + return nil, err + } + if curType == targetType { + return nil, fmt.Errorf("Device already is in state %s", targetType) } - return d, err -} -func (i *DevicePortServiceOp) DeviceToLayerThree(deviceID string) (*Device, error) { - // hopefull all the VLANs are unassigned at this point - bond0, err := i.client.DevicePorts.GetBondPort(deviceID) - if err != nil { - return nil, err + numOfBonds := d.NumOfBonds() + if (numOfBonds < 1) || (numOfBonds > 2) { + return nil, fmt.Errorf("Strange number of bonds: %d", numOfBonds) } - bond0, _, err = i.client.DevicePorts.PortToLayerThree(bond0.ID) + if numOfBonds == 1 { + err = i.client.DevicePorts.Convert1BondDevice(d, targetType) + } else { + err = i.client.DevicePorts.Convert2BondDevice(d, targetType) + } if err != nil { return nil, err } - d, _, err := i.client.Devices.Get(deviceID, nil) - return d, err -} -// DeviceToLayerTwo converts device to L2 networking. Use bond0 to attach VLAN. -func (i *DevicePortServiceOp) DeviceToLayerTwo(deviceID string) (*Device, error) { - bond0, err := i.client.DevicePorts.GetBondPort(deviceID) + d, _, err = i.client.Devices.Get(deviceID, nil) if err != nil { return nil, err } - bond0, _, err = i.client.DevicePorts.PortToLayerTwo(bond0.ID) + finalType, err := d.GetNetworkType() if err != nil { return nil, err } - d, _, err := i.client.Devices.Get(deviceID, nil) - return d, err + if finalType != targetType { + return nil, fmt.Errorf( + "Failed to convert device %s from %s to %s. New type was %s", + deviceID, curType, targetType, finalType) + + } + return d, err } diff --git a/vendor/github.com/packethost/packngo/sshkeys.go b/vendor/github.com/packethost/packngo/sshkeys.go index 4b198f571a4..1427d6c3cc9 100644 --- a/vendor/github.com/packethost/packngo/sshkeys.go +++ b/vendor/github.com/packethost/packngo/sshkeys.go @@ -28,7 +28,7 @@ type SSHKey struct { FingerPrint string `json:"fingerprint"` Created string `json:"created_at"` Updated string `json:"updated_at"` - User User `json:"user,omitempty"` + Owner Href URL string `json:"href,omitempty"` } diff --git a/vendor/github.com/packethost/packngo/volumes.go b/vendor/github.com/packethost/packngo/volumes.go index ebaaddf1c23..9d9bc3f379b 100644 --- a/vendor/github.com/packethost/packngo/volumes.go +++ b/vendor/github.com/packethost/packngo/volumes.go @@ -222,17 +222,17 @@ func (v *VolumeAttachmentServiceOp) Delete(attachmentID string) (*Response, erro } // Lock sets a volume to "locked" -func (s *VolumeServiceOp) Lock(id string) (*Response, error) { +func (v *VolumeServiceOp) Lock(id string) (*Response, error) { path := fmt.Sprintf("%s/%s", volumeBasePath, id) action := lockType{Locked: true} - return s.client.DoRequest("PATCH", path, action, nil) + return v.client.DoRequest("PATCH", path, action, nil) } // Unlock sets a volume to "unlocked" -func (s *VolumeServiceOp) Unlock(id string) (*Response, error) { +func (v *VolumeServiceOp) Unlock(id string) (*Response, error) { path := fmt.Sprintf("%s/%s", volumeBasePath, id) action := lockType{Locked: false} - return s.client.DoRequest("PATCH", path, action, nil) + return v.client.DoRequest("PATCH", path, action, nil) } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go index 3adc559ace3..dbb7e8dd54b 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/config.go @@ -1,8 +1,15 @@ package packet import ( - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/helper/logging" + "context" + "crypto/x509" + "net/http" + "net/url" + "regexp" + "time" + + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/terraform-plugin-sdk/helper/logging" "github.com/packethost/packngo" ) @@ -14,9 +21,42 @@ type Config struct { AuthToken string } +var redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + +func PacketRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + return false, nil +} + // Client returns a new client for accessing Packet's API. func (c *Config) Client() *packngo.Client { - client := cleanhttp.DefaultClient() - client.Transport = logging.NewTransport("Packet", client.Transport) - return packngo.NewClientWithAuth(consumerToken, c.AuthToken, client) + httpClient := retryablehttp.NewClient() + httpClient.RetryWaitMin = time.Second + httpClient.RetryWaitMax = 30 * time.Second + httpClient.RetryMax = 10 + httpClient.CheckRetry = PacketRetryPolicy + httpClient.HTTPClient.Transport = logging.NewTransport( + "Packet", + httpClient.HTTPClient.Transport) + + return packngo.NewClientWithAuth(consumerToken, c.AuthToken, httpClient) } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device.go new file mode 100644 index 00000000000..b86c4e68f52 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device.go @@ -0,0 +1,286 @@ +package packet + +import ( + "encoding/json" + "fmt" + "path" + "path/filepath" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/packethost/packngo" +) + +func dataSourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketDeviceRead, + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"device_id"}, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"device_id"}, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "device_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"project_id", "hostname"}, + }, + "facility": { + Type: schema.TypeString, + Computed: true, + }, + "plan": { + Type: schema.TypeString, + Computed: true, + }, + "operating_system": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "billing_cycle": { + Type: schema.TypeString, + Computed: true, + }, + "access_public_ipv6": { + Type: schema.TypeString, + Computed: true, + }, + + "access_public_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + "access_private_ipv4": { + Type: schema.TypeString, + Computed: true, + }, + "tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ssh_key_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network_type": { + Type: schema.TypeString, + Computed: true, + }, + "hardware_reservation_id": { + Type: schema.TypeString, + Computed: true, + }, + "storage": { + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + s, _ := structure.NormalizeJsonString(v) + return s + }, + Computed: true, + }, + "root_password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "always_pxe": { + Type: schema.TypeBool, + Computed: true, + }, + "ipxe_script_url": { + Type: schema.TypeString, + Computed: true, + }, + "network": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "family": { + Type: schema.TypeInt, + Computed: true, + }, + "cidr": { + Type: schema.TypeInt, + Computed: true, + }, + "public": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "ports": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "mac": { + Type: schema.TypeString, + Computed: true, + }, + "bonded": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + hostnameRaw, hostnameOK := d.GetOk("hostname") + projectIdRaw, projectIdOK := d.GetOk("project_id") + deviceIdRaw, deviceIdOK := d.GetOk("device_id") + + if !deviceIdOK && !hostnameOK { + return fmt.Errorf("You must supply device_id or hostname") + } + var device *packngo.Device + if hostnameOK { + if !projectIdOK { + return fmt.Errorf("If you lookup via hostname, you must supply project_id") + } + hostname := hostnameRaw.(string) + projectId := projectIdRaw.(string) + + ds, _, err := client.Devices.List(projectId, nil) + if err != nil { + return err + } + + device, err = findDeviceByHostname(ds, hostname) + if err != nil { + return err + } + } else { + deviceId := deviceIdRaw.(string) + var err error + device, _, err = client.Devices.Get(deviceId, nil) + if err != nil { + return err + } + } + + d.Set("hostname", device.Hostname) + d.Set("project_id", device.Project.ID) + d.Set("device_id", device.ID) + d.Set("plan", device.Plan.Slug) + d.Set("facility", device.Facility.Code) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("ipxe_script_url", device.IPXEScriptURL) + d.Set("always_pxe", device.AlwaysPXE) + d.Set("root_password", device.RootPassword) + if device.Storage != nil { + rawStorageBytes, err := json.Marshal(device.Storage) + if err != nil { + return fmt.Errorf("[ERR] Error getting storage JSON string for device (%s): %s", d.Id(), err) + } + + storageString, err := structure.NormalizeJsonString(string(rawStorageBytes)) + if err != nil { + return fmt.Errorf("[ERR] Errori normalizing storage JSON string for device (%s): %s", d.Id(), err) + } + d.Set("storage", storageString) + } + + if len(device.HardwareReservation.Href) > 0 { + d.Set("hardware_reservation_id", path.Base(device.HardwareReservation.Href)) + } + networkType, err := device.GetNetworkType() + if err != nil { + return err + } + + d.Set("network_type", networkType) + + d.Set("tags", device.Tags) + + keyIDs := []string{} + for _, k := range device.SSHKeys { + keyIDs = append(keyIDs, filepath.Base(k.URL)) + } + d.Set("ssh_key_ids", keyIDs) + networkInfo := getNetworkInfo(device.Network) + + sort.SliceStable(networkInfo.Networks, func(i, j int) bool { + famI := networkInfo.Networks[i]["family"].(int) + famJ := networkInfo.Networks[j]["family"].(int) + pubI := networkInfo.Networks[i]["public"].(bool) + pubJ := networkInfo.Networks[j]["public"].(bool) + return getNetworkRank(famI, pubI) < getNetworkRank(famJ, pubJ) + }) + + d.Set("network", networkInfo.Networks) + d.Set("access_public_ipv4", networkInfo.PublicIPv4) + d.Set("access_private_ipv4", networkInfo.PrivateIPv4) + d.Set("access_public_ipv6", networkInfo.PublicIPv6) + + ports := getPorts(device.NetworkPorts) + d.Set("ports", ports) + + d.SetId(device.ID) + return nil +} + +func findDeviceByHostname(devices []packngo.Device, hostname string) (*packngo.Device, error) { + results := make([]packngo.Device, 0) + for _, d := range devices { + if d.Hostname == hostname { + results = append(results, d) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no device found with hostname %s", hostname) + } + return nil, fmt.Errorf("too many devices found with hostname %s (found %d, expected 1)", hostname, len(results)) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device_bgp_neighbors.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device_bgp_neighbors.go new file mode 100644 index 00000000000..f6707803752 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_device_bgp_neighbors.go @@ -0,0 +1,132 @@ +package packet + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func bgpNeighborSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address_family": { + Type: schema.TypeInt, + Computed: true, + }, + "customer_as": { + Type: schema.TypeInt, + Computed: true, + }, + "customer_ip": { + Type: schema.TypeString, + Computed: true, + }, + "md5_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "md5_password": { + Type: schema.TypeString, + Computed: true, + }, + "multihop": { + Type: schema.TypeBool, + Computed: true, + }, + "peer_as": { + Type: schema.TypeInt, + Computed: true, + }, + "peer_ips": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "routes_in": { + Type: schema.TypeList, + Computed: true, + Elem: bgpRouteSchema(), + }, + "routes_out": { + Type: schema.TypeList, + Computed: true, + Elem: bgpRouteSchema(), + }, + }, + } +} + +func bgpRouteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "route": { + Type: schema.TypeString, + Computed: true, + }, + "exact": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func dataSourcePacketDeviceBGPNeighbors() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketDeviceBGPNeighborsRead, + Schema: map[string]*schema.Schema{ + "device_id": { + Type: schema.TypeString, + Required: true, + }, + "bgp_neighbors": { + Type: schema.TypeList, + Computed: true, + Elem: bgpNeighborSchema(), + }, + }, + } +} + +func dataSourcePacketDeviceBGPNeighborsRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + deviceID := d.Get("device_id").(string) + + bgpNeighborsRaw, _, err := client.Devices.ListBGPNeighbors(deviceID, nil) + if err != nil { + return err + } + + d.Set("bgp_neighbors", getBgpNeighbors(bgpNeighborsRaw)) + d.SetId(deviceID) + return nil +} + +func getRoutesSlice(routes []packngo.BGPRoute) []map[string]interface{} { + ret := []map[string]interface{}{} + for _, r := range routes { + ret = append(ret, map[string]interface{}{ + "route": r.Route, "exact": r.Exact, + }) + } + return ret +} + +func getBgpNeighbors(ns []packngo.BGPNeighbor) []map[string]interface{} { + ret := make([]map[string]interface{}, 0, 1) + for _, n := range ns { + neighbor := map[string]interface{}{ + "address_family": n.AddressFamily, + "customer_as": n.CustomerAs, + "customer_ip": n.CustomerIP, + "md5_enabled": n.Md5Enabled, + "md5_password": n.Md5Password, + "multihop": n.Multihop, + "peer_as": n.PeerAs, + "peer_ips": n.PeerIps, + "routes_in": getRoutesSlice(n.RoutesIn), + "routes_out": getRoutesSlice(n.RoutesOut), + } + ret = append(ret, neighbor) + } + return ret +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_ip_block_ranges.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_ip_block_ranges.go new file mode 100644 index 00000000000..d2f7cf818de --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_ip_block_ranges.go @@ -0,0 +1,104 @@ +package packet + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketIPBlockRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketIPBlockRangesRead, + + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + }, + "facility": { + Type: schema.TypeString, + Optional: true, + }, + "public_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "global_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "private_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "ipv6": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func faclityMatch(ref, ipFacility string) bool { + if ref == "" { + return true + } + if ref == ipFacility { + return true + } + return false +} + +func dataSourcePacketIPBlockRangesRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + projectID := d.Get("project_id").(string) + ips, _, err := client.ProjectIPs.List(projectID) + if err != nil { + return err + } + + facility := d.Get("facility").(string) + + publicIPv4s := []string{} + globalIPv4s := []string{} + privateIPv4s := []string{} + theIPv6s := []string{} + var targetSlice *[]string + + for _, ip := range ips { + targetSlice = nil + cnStr := fmt.Sprintf("%s/%d", ip.Network, ip.CIDR) + if ip.AddressFamily == 4 { + if ip.Public { + if getGlobalBool(&ip) { + globalIPv4s = append(globalIPv4s, cnStr) + } else { + targetSlice = &publicIPv4s + } + } else { + targetSlice = &privateIPv4s + } + } else { + targetSlice = &theIPv6s + } + if targetSlice != nil && faclityMatch(facility, ip.Facility.Code) { + *targetSlice = append(*targetSlice, cnStr) + } + } + + d.Set("public_ipv4", publicIPv4s) + d.Set("global_ipv4", globalIPv4s) + d.Set("private_ipv4", privateIPv4s) + d.Set("ipv6", theIPv6s) + if facility != "" { + facility = "-" + facility + } + d.SetId(projectID + facility + "-IPs") + return nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go index 8221da1e819..f6b5351fd01 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_operating_system.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -58,73 +58,60 @@ func dataSourcePacketOperatingSystemRead(d *schema.ResourceData, meta interface{ return err } - final := []packngo.OS{} - temp := []packngo.OS{} - if nameOK { + temp := []packngo.OS{} for _, os := range oss { if strings.Contains(strings.ToLower(os.Name), strings.ToLower(name.(string))) { temp = append(temp, os) } - final = temp } + oss = temp } - if distroOK { - temp = []packngo.OS{} - if len(temp) == 0 { - final = oss - } - for _, v := range final { + if distroOK && (len(oss) != 0) { + temp := []packngo.OS{} + for _, v := range oss { if v.Distro == distro.(string) { temp = append(temp, v) } } - final = temp + oss = temp } - if versionOK { - temp = []packngo.OS{} - if len(final) == 0 { - final = oss - } - for _, v := range final { + if versionOK && (len(oss) != 0) { + temp := []packngo.OS{} + for _, v := range oss { if v.Version == version.(string) { temp = append(temp, v) } } - final = temp + oss = temp } - if provisionableOnOK { - temp = []packngo.OS{} - if len(final) == 0 { - final = oss - } - for _, v := range final { + if provisionableOnOK && (len(oss) != 0) { + temp := []packngo.OS{} + for _, v := range oss { for _, po := range v.ProvisionableOn { if po == provisionableOn.(string) { temp = append(temp, v) } } } - final = temp + oss = temp } - log.Println("[DEBUG] RESULTS:", final) + log.Println("[DEBUG] RESULTS:", oss) - if len(final) == 0 { + if len(oss) == 0 { return fmt.Errorf("There are no operating systems that match the search criteria") } - if len(final) > 1 { + if len(oss) > 1 { return fmt.Errorf("There is more than one operating system that matches the search criteria") } - for _, v := range final { - d.Set("name", v.Name) - d.Set("distro", v.Distro) - d.Set("version", v.Version) - d.Set("slug", v.Slug) - d.SetId(v.Slug) - } + d.Set("name", oss[0].Name) + d.Set("distro", oss[0].Distro) + d.Set("version", oss[0].Version) + d.Set("slug", oss[0].Slug) + d.SetId(oss[0].Slug) return nil } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_organization.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_organization.go new file mode 100644 index 00000000000..7a960904528 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_organization.go @@ -0,0 +1,119 @@ +package packet + +import ( + "fmt" + "log" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketOrganization() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketOrganizationRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"organization_id"}, + }, + "organization_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name"}, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "website": { + Type: schema.TypeString, + Computed: true, + }, + + "twitter": { + Type: schema.TypeString, + Computed: true, + }, + "logo": { + Type: schema.TypeString, + Computed: true, + }, + "project_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func findOrgByName(os []packngo.Organization, name string) (*packngo.Organization, error) { + results := make([]packngo.Organization, 0) + for _, o := range os { + if o.Name == name { + results = append(results, o) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no organization found with name %s", name) + } + return nil, fmt.Errorf("too many organizations found with name %s (found %d, expected 1)", name, len(results)) +} + +func dataSourcePacketOrganizationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + nameRaw, nameOK := d.GetOk("name") + orgIdRaw, orgIdOK := d.GetOk("organization_id") + + if !orgIdOK && !nameOK { + return fmt.Errorf("You must supply organization_id or name") + } + var org *packngo.Organization + + if nameOK { + name := nameRaw.(string) + + os, _, err := client.Organizations.List(nil) + if err != nil { + return err + } + + org, err = findOrgByName(os, name) + if err != nil { + return err + } + } else { + orgId := orgIdRaw.(string) + log.Println(orgId) + var err error + org, _, err = client.Organizations.Get(orgId, nil) + if err != nil { + return err + } + } + projectIds := []string{} + + for _, p := range org.Projects { + projectIds = append(projectIds, filepath.Base(p.URL)) + } + + d.Set("organization_id", org.ID) + d.Set("name", org.Name) + d.Set("description", org.Description) + d.Set("website", org.Website) + d.Set("twitter", org.Twitter) + d.Set("logo", org.Logo) + d.Set("project_ids", projectIds) + d.SetId(org.ID) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go index fd7ec49109d..127f93bed26 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_precreated_ip_block.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_project.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_project.go new file mode 100644 index 00000000000..930ce0b0c18 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_project.go @@ -0,0 +1,168 @@ +package packet + +import ( + "fmt" + "log" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +func dataSourcePacketProject() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketProjectRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"project_id"}, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name"}, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + + "backend_transfer": { + Type: schema.TypeBool, + Computed: true, + }, + + "payment_method_id": { + Type: schema.TypeString, + Computed: true, + }, + + "organization_id": { + Type: schema.TypeString, + Computed: true, + }, + "user_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "bgp_config": { + Type: schema.TypeList, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"local", "global"}, false), + }, + "asn": { + Type: schema.TypeInt, + Required: true, + }, + "md5": { + Type: schema.TypeString, + Optional: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "max_prefix": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + nameRaw, nameOK := d.GetOk("name") + projectIdRaw, projectIdOK := d.GetOk("project_id") + + if !projectIdOK && !nameOK { + return fmt.Errorf("You must supply project_id or name") + } + var project *packngo.Project + + if nameOK { + name := nameRaw.(string) + + os, _, err := client.Projects.List(nil) + if err != nil { + return err + } + + project, err = findProjectByName(os, name) + if err != nil { + return err + } + } else { + projectId := projectIdRaw.(string) + log.Println(projectId) + var err error + project, _, err = client.Projects.Get(projectId, nil) + if err != nil { + return err + } + } + + d.SetId(project.ID) + d.Set("payment_method_id", path.Base(project.PaymentMethod.URL)) + d.Set("name", project.Name) + d.Set("project_id", project.ID) + d.Set("organization_id", path.Base(project.Organization.URL)) + d.Set("created", project.Created) + d.Set("updated", project.Updated) + d.Set("backend_transfer", project.BackendTransfer) + + bgpConf, _, err := client.BGPConfig.Get(project.ID, nil) + userIds := []string{} + for _, u := range project.Users { + userIds = append(userIds, path.Base(u.URL)) + } + d.Set("user_ids", userIds) + + if (err == nil) && (bgpConf != nil) { + // guard against an empty struct + if bgpConf.ID != "" { + err := d.Set("bgp_config", flattenBGPConfig(bgpConf)) + if err != nil { + err = friendlyError(err) + return err + } + } + } + return nil +} + +func findProjectByName(ps []packngo.Project, name string) (*packngo.Project, error) { + results := make([]packngo.Project, 0) + for _, p := range ps { + if p.Name == name { + results = append(results, p) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no project found with name %s", name) + } + return nil, fmt.Errorf("too many projects found with name %s (found %d, expected 1)", name, len(results)) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go index 741a0106059..df3b8627213 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_price.go @@ -3,7 +3,7 @@ package packet import ( "fmt" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_request.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_request.go new file mode 100644 index 00000000000..da239e48b98 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_spot_market_request.go @@ -0,0 +1,49 @@ +package packet + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketSpotMarketRequest() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketSpotMarketRequestRead, + + Schema: map[string]*schema.Schema{ + "request_id": { + Type: schema.TypeString, + Required: true, + }, + "device_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + Timeouts: resourceDefaultTimeouts, + } +} +func dataSourcePacketSpotMarketRequestRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + id := d.Get("request_id").(string) + + smr, _, err := client.SpotMarketRequests.Get(id, &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) + if err != nil { + err = friendlyError(err) + if isNotFound(err) { + d.SetId("") + return nil + } + return err + } + + deviceIDs := make([]string, len(smr.Devices)) + for i, d := range smr.Devices { + deviceIDs[i] = d.ID + } + d.Set("device_ids", deviceIDs) + d.SetId(id + strings.Join(deviceIDs, "-")) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_volume.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_volume.go new file mode 100644 index 00000000000..e8f23d0d05e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/datasource_packet_volume.go @@ -0,0 +1,188 @@ +package packet + +import ( + "fmt" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/packethost/packngo" +) + +func dataSourcePacketVolume() *schema.Resource { + return &schema.Resource{ + Read: dataSourcePacketVolumeRead, + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"volume_id"}, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"volume_id"}, + }, + "volume_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"project_id", "name"}, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + + "facility": { + Type: schema.TypeString, + Computed: true, + }, + + "plan": { + Type: schema.TypeString, + Computed: true, + }, + + "billing_cycle": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "locked": { + Type: schema.TypeBool, + Computed: true, + }, + + "snapshot_policies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshot_frequency": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + + "device_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "created": { + Type: schema.TypeString, + Computed: true, + }, + + "updated": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + nameRaw, nameOK := d.GetOk("name") + projectIdRaw, projectIdOK := d.GetOk("project_id") + volumeIdRaw, volumeIdOK := d.GetOk("volume_id") + + if !volumeIdOK && !nameOK { + return fmt.Errorf("You must supply volume_id or name") + } + var volume *packngo.Volume + if nameOK { + if !projectIdOK { + return fmt.Errorf("If you lookup via name, you must supply project_id") + } + name := nameRaw.(string) + projectId := projectIdRaw.(string) + + vs, _, err := client.Volumes.List(projectId, &packngo.ListOptions{Includes: []string{"attachments.device"}}) + if err != nil { + return err + } + + volume, err = findVolumeByName(vs, name) + if err != nil { + return err + } + } else { + volumeId := volumeIdRaw.(string) + var err error + volume, _, err = client.Volumes.Get(volumeId, &packngo.GetOptions{Includes: []string{"attachments.device"}}) + if err != nil { + return err + } + } + + d.Set("name", volume.Name) + d.Set("description", volume.Description) + d.Set("size", volume.Size) + d.Set("plan", volume.Plan.Slug) + d.Set("facility", volume.Facility.Code) + d.Set("state", volume.State) + d.Set("billing_cycle", volume.BillingCycle) + d.Set("locked", volume.Locked) + d.Set("created", volume.Created) + d.Set("updated", volume.Updated) + d.Set("project_id", volume.Project.ID) + + snapshot_policies := make([]map[string]interface{}, 0, len(volume.SnapshotPolicies)) + for _, snapshot_policy := range volume.SnapshotPolicies { + policy := map[string]interface{}{ + "snapshot_frequency": snapshot_policy.SnapshotFrequency, + "snapshot_count": snapshot_policy.SnapshotCount, + } + snapshot_policies = append(snapshot_policies, policy) + } + d.Set("snapshot_policies", snapshot_policies) + + deviceIds := []string{} + + for _, a := range volume.Attachments { + deviceIds = append(deviceIds, path.Base(a.Device.Href)) + } + + d.Set("device_ids", deviceIds) + d.SetId(volume.ID) + + return nil +} + +func findVolumeByName(volumes []packngo.Volume, name string) (*packngo.Volume, error) { + results := make([]packngo.Volume, 0) + for _, v := range volumes { + if v.Name == name { + results = append(results, v) + } + } + if len(results) == 1 { + return &results[0], nil + } + if len(results) == 0 { + return nil, fmt.Errorf("no volume found with name %s", name) + } + return nil, fmt.Errorf("too many volumes found with hostname %s (found %d, expected 1)", name, len(results)) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go index 9b945347ac7..1cca69cce40 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/errors.go @@ -9,16 +9,28 @@ import ( func friendlyError(err error) error { if e, ok := err.(*packngo.ErrorResponse); ok { + resp := e.Response errors := Errors(e.Errors) // if packngo gives us blank error strings, populate them with something useful // this is useful so the user gets some sort of indication of a failure rather than a blank message if 0 == len(errors) { errors = Errors{e.SingleError} } - return &ErrorResponse{ - StatusCode: e.Response.StatusCode, + er := &ErrorResponse{ + StatusCode: resp.StatusCode, Errors: errors, } + respHead := resp.Header + + // this checks if the error comes from API (and not from cache/LB) + if len(errors) > 0 { + ct := respHead.Get("Content-Type") + xrid := respHead.Get("X-Request-Id") + if strings.Contains(ct, "application/json") && len(xrid) > 0 { + er.IsAPIError = true + } + } + return er } return err } @@ -32,7 +44,7 @@ func isForbidden(err error) bool { func isNotFound(err error) bool { if r, ok := err.(*ErrorResponse); ok { - return r.StatusCode == http.StatusNotFound + return r.StatusCode == http.StatusNotFound && r.IsAPIError } return false } @@ -46,4 +58,5 @@ func (e Errors) Error() string { type ErrorResponse struct { StatusCode int Errors + IsAPIError bool } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/helpers_device.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/helpers_device.go new file mode 100644 index 00000000000..a2d183ab67c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/helpers_device.go @@ -0,0 +1,247 @@ +package packet + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/packethost/packngo" +) + +var wgMap = map[string]*sync.WaitGroup{} +var wgMutex = sync.Mutex{} + +func ifToIPCreateRequest(m interface{}) packngo.IPAddressCreateRequest { + iacr := packngo.IPAddressCreateRequest{} + ia := m.(map[string]interface{}) + at := ia["type"].(string) + switch at { + case "public_ipv4": + iacr.AddressFamily = 4 + iacr.Public = true + case "private_ipv4": + iacr.AddressFamily = 4 + iacr.Public = false + case "public_ipv6": + iacr.AddressFamily = 6 + iacr.Public = true + } + iacr.CIDR = ia["cidr"].(int) + iacr.Reservations = convertStringArr(ia["reservation_ids"].([]interface{})) + return iacr +} + +func getNewIPAddressSlice(arr []interface{}) []packngo.IPAddressCreateRequest { + addressTypesSlice := make([]packngo.IPAddressCreateRequest, len(arr)) + + for i, m := range arr { + addressTypesSlice[i] = ifToIPCreateRequest(m) + } + return addressTypesSlice +} + +type NetworkInfo struct { + Networks []map[string]interface{} + IPv4SubnetSize int + Host string + PublicIPv4 string + PublicIPv6 string + PrivateIPv4 string +} + +func getNetworkInfo(ips []*packngo.IPAddressAssignment) NetworkInfo { + ni := NetworkInfo{Networks: make([]map[string]interface{}, 0, 1)} + for _, ip := range ips { + network := map[string]interface{}{ + "address": ip.Address, + "gateway": ip.Gateway, + "family": ip.AddressFamily, + "cidr": ip.CIDR, + "public": ip.Public, + } + ni.Networks = append(ni.Networks, network) + + // Initial device IPs are fixed and marked as "Management" + if ip.Management { + if ip.AddressFamily == 4 { + if ip.Public { + ni.Host = ip.Address + ni.IPv4SubnetSize = ip.CIDR + ni.PublicIPv4 = ip.Address + } else { + ni.PrivateIPv4 = ip.Address + } + } else { + ni.PublicIPv6 = ip.Address + } + } + } + return ni +} + +func getNetworkRank(family int, public bool) int { + switch { + case family == 4 && public: + return 0 + case family == 6: + return 1 + case family == 4 && public: + return 2 + } + return 3 +} + +func getPorts(ps []packngo.Port) []map[string]interface{} { + ret := make([]map[string]interface{}, 0, 1) + for _, p := range ps { + port := map[string]interface{}{ + "name": p.Name, + "id": p.ID, + "type": p.Type, + "mac": p.Data.MAC, + "bonded": p.Data.Bonded, + } + ret = append(ret, port) + } + return ret +} + +func waitUntilReservationProvisionable(id string, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: func() (interface{}, string, error) { + client := meta.(*packngo.Client) + r, _, err := client.HardwareReservations.Get(id, nil) + if err != nil { + return 42, "error", friendlyError(err) + } + provisionableString := "false" + if r.Provisionable { + provisionableString = "true" + } + return 42, provisionableString, nil + }, + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func getWaitForDeviceLock(deviceID string) *sync.WaitGroup { + wgMutex.Lock() + defer wgMutex.Unlock() + wg, ok := wgMap[deviceID] + if !ok { + wg = &sync.WaitGroup{} + wgMap[deviceID] = wg + } + return wg +} + +func waitForDeviceAttribute(d *schema.ResourceData, targets []string, pending []string, attribute string, meta interface{}) (string, error) { + + wg := getWaitForDeviceLock(d.Id()) + wg.Wait() + + wgMutex.Lock() + wg.Add(1) + wgMutex.Unlock() + + defer func() { + wgMutex.Lock() + wg.Done() + wgMutex.Unlock() + }() + + if attribute != "state" && attribute != "network_type" { + return "", fmt.Errorf("unsupported attr to wait for: %s", attribute) + } + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: targets, + Refresh: func() (interface{}, string, error) { + client := meta.(*packngo.Client) + device, _, err := client.Devices.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project"}}) + if err == nil { + retAttrVal := device.State + if attribute == "network_type" { + networkType, nterr := device.GetNetworkType() + if nterr != nil { + return "error", "error", nterr + } + retAttrVal = networkType + } + return retAttrVal, retAttrVal, nil + } + return "error", "error", err + }, + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + attrval, err := stateConf.WaitForState() + + return attrval.(string), err +} + +// powerOnAndWait Powers on the device and waits for it to be active. +func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.Devices.PowerOn(d.Id()) + if err != nil { + return friendlyError(err) + } + + _, err = waitForDeviceAttribute(d, []string{"active", "failed"}, []string{"off"}, "state", client) + if err != nil { + return err + } + state := d.Get("state").(string) + if state != "active" { + return friendlyError(fmt.Errorf("Device in non-active state \"%s\"", state)) + } + return nil +} + +func validateFacilityForDevice(v interface{}, k string) (ws []string, errors []error) { + if v.(string) == "any" { + errors = append(errors, fmt.Errorf(`Cannot use facility: "any"`)) + } + return +} + +func ipAddressSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ipAddressTypes, false), + Description: fmt.Sprintf("one of %s", strings.Join(ipAddressTypes, ",")), + }, + "cidr": { + Type: schema.TypeInt, + Optional: true, + Description: "CIDR suffix for IP block assigned to this device", + }, + "reservation_ids": { + Type: schema.TypeList, + Optional: true, + Description: "IDs of reservations to pick the blocks from", + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), + }, + }, + }, + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go index e88cd5a70f4..69c5e9d35cf 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/provider.go @@ -3,12 +3,15 @@ package packet import ( "time" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -// Provider returns a schema.Provider for managing Packet infrastructure. +var packetMutexKV = mutexkv.NewMutexKV() + func Provider() terraform.ResourceProvider { + return &schema.Provider{ Schema: map[string]*schema.Schema{ "auth_token": { @@ -19,9 +22,16 @@ func Provider() terraform.ResourceProvider { }, }, DataSourcesMap: map[string]*schema.Resource{ - "packet_precreated_ip_block": dataSourcePacketPreCreatedIPBlock(), - "packet_operating_system": dataSourceOperatingSystem(), - "packet_spot_market_price": dataSourceSpotMarketPrice(), + "packet_ip_block_ranges": dataSourcePacketIPBlockRanges(), + "packet_precreated_ip_block": dataSourcePacketPreCreatedIPBlock(), + "packet_operating_system": dataSourceOperatingSystem(), + "packet_organization": dataSourcePacketOrganization(), + "packet_spot_market_price": dataSourceSpotMarketPrice(), + "packet_device": dataSourcePacketDevice(), + "packet_device_bgp_neighbors": dataSourcePacketDeviceBGPNeighbors(), + "packet_project": dataSourcePacketProject(), + "packet_spot_market_request": dataSourcePacketSpotMarketRequest(), + "packet_volume": dataSourcePacketVolume(), }, ResourcesMap: map[string]*schema.Resource{ @@ -38,7 +48,6 @@ func Provider() terraform.ResourceProvider { "packet_vlan": resourcePacketVlan(), "packet_bgp_session": resourcePacketBGPSession(), "packet_port_vlan_attachment": resourcePacketPortVlanAttachment(), - "packet_connect": resourcePacketConnect(), }, ConfigureFunc: providerConfigure, diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go index d548e3986b2..6a8f61ce429 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_bgp_session.go @@ -3,8 +3,8 @@ package packet import ( "log" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/packethost/packngo" ) @@ -52,7 +52,7 @@ func resourcePacketBGPSessionCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] creating %s BGP session to device (%s)\n", addressFamily, dID) bgpSession, _, err := client.BGPSessions.Create( dID, packngo.CreateBGPSessionRequest{ - AddressFamily: "ipv4", + AddressFamily: addressFamily, DefaultRoute: &defaultRoute}) if err != nil { return friendlyError(err) diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go deleted file mode 100644 index ccb0b7cc08c..00000000000 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_connect.go +++ /dev/null @@ -1,157 +0,0 @@ -package packet - -import ( - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketConnect() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketConnectCreate, - Read: resourcePacketConnectRead, - Delete: resourcePacketConnectDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "provider_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "facility": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port_speed": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "provider_payload": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Sensitive: true, - }, - "vxlan": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func waitForConnectStatus(d *schema.ResourceData, target string, pending string, meta interface{}) (interface{}, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{pending}, - Target: []string{target}, - Refresh: connectRefreshFunc(d, meta), - Timeout: 60 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - return stateConf.WaitForState() -} - -func connectRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - client := meta.(*packngo.Client) - - return func() (interface{}, string, error) { - if err := resourcePacketConnectRead(d, meta); err != nil { - return nil, "", err - } - - if status, ok := d.GetOk("status"); ok { - projectId := d.Get("project_id").(string) - c, _, err := client.Connects.Get(d.Id(), projectId, nil) - if err != nil { - return nil, "", friendlyError(err) - } - return c, status.(string), nil - } - - return nil, "", nil - } -} - -func resourcePacketConnectCreate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*packngo.Client) - createRequest := &packngo.ConnectCreateRequest{ - ProjectID: d.Get("project_id").(string), - ProviderID: d.Get("provider_id").(string), - Name: d.Get("name").(string), - Facility: d.Get("facility").(string), - ProviderPayload: d.Get("provider_payload").(string), - VLAN: d.Get("vxlan").(int), - PortSpeed: d.Get("port_speed").(int), - Description: d.Get("name").(string), - Tags: []string{d.Get("name").(string)}, - } - - pc, _, err := c.Connects.Create(createRequest) - if err != nil { - return friendlyError(err) - } - d.SetId(pc.ID) - _, err = waitForConnectStatus(d, "PROVISIONED", "PROVISIONING", meta) - if err != nil { - return friendlyError(err) - } - return resourcePacketConnectRead(d, meta) -} - -func resourcePacketConnectRead(d *schema.ResourceData, meta interface{}) error { - c := meta.(*packngo.Client) - pc, _, err := c.Connects.Get(d.Id(), d.Get("project_id").(string), nil) - if err != nil { - return friendlyError(err) - } - d.Set("name", pc.Name) - d.Set("provider_id", pc.ProviderID) - d.Set("provider_payload", pc.ProviderPayload) - d.Set("status", pc.Status) - d.Set("port_speed", pc.PortSpeed) - d.Set("vxlan", pc.VLAN) - - return nil -} - -func resourcePacketConnectDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - pc, _, err := client.Connects.Deprovision(d.Id(), d.Get("project_id").(string), false) - if err != nil { - return friendlyError(err) - } - _, err = waitForConnectStatus(d, "DEPROVISIONED", "DEPROVISIONING", meta) - if err != nil { - return friendlyError(err) - } - - _, err = client.Connects.Delete(d.Id(), pc.ProjectID) - if err != nil { - return friendlyError(err) - } - - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go index 8af251c73bd..e1c661f86fe 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_device.go @@ -1,6 +1,7 @@ package packet import ( + "encoding/json" "errors" "fmt" "path" @@ -11,17 +12,22 @@ import ( "time" "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/packethost/packngo" ) var matchIPXEScript = regexp.MustCompile(`(?i)^#![i]?pxe`) +var ipAddressTypes = []string{"public_ipv4", "private_ipv4", "public_ipv6"} func resourcePacketDevice() *schema.Resource { return &schema.Resource{ + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, Create: resourcePacketDeviceCreate, Read: resourcePacketDeviceRead, Update: resourcePacketDeviceUpdate, @@ -52,28 +58,50 @@ func resourcePacketDevice() *schema.Resource { ForceNew: true, }, + "deployed_facility": { + Type: schema.TypeString, + Computed: true, + }, + "facility": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateFacilityForDevice, - Deprecated: "Use the 'facilities' array instead.", - ConflictsWith: []string{"facilities"}, + Type: schema.TypeString, + Optional: true, + Removed: "Use the \"facilities\" array instead, i.e. change \n facility = \"ewr1\"\nto \n facilities = [\"ewr1\"]", + }, + "ip_address_types": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(ipAddressTypes, false), + }, + Removed: "Removed in favor of 'ip_address' attribute.", + }, + "facilities": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + MinItems: 1, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // ignore set of empty facility "" => "xxx1" - if new == "" { + fsRaw := d.Get("facilities") + fs := convertStringArr(fsRaw.([]interface{})) + df := d.Get("deployed_facility").(string) + if contains(fs, df) { + return true + } + if contains(fs, "any") && (len(df) != 0) { return true } return false }, }, - - "facilities": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"facility"}, + "ip_address": { + Type: schema.TypeList, + Optional: true, + Description: "Inbound rules for this security group", + Elem: ipAddressSchema(), + MinItems: 1, }, "plan": { @@ -118,7 +146,7 @@ func resourcePacketDevice() *schema.Resource { Computed: true, }, - "network_type": &schema.Schema{ + "network_type": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{"layer3", "layer2-bonded", "layer2-individual", "hybrid"}, false), @@ -130,28 +158,28 @@ func resourcePacketDevice() *schema.Resource { }, }, - "ports": &schema.Schema{ + "ports": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Computed: true, }, - "id": &schema.Schema{ + "id": { Type: schema.TypeString, Computed: true, }, - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Computed: true, }, - "mac": &schema.Schema{ + "mac": { Type: schema.TypeString, Computed: true, }, - "bonded": &schema.Schema{ + "bonded": { Type: schema.TypeBool, Computed: true, }, @@ -205,17 +233,9 @@ func resourcePacketDevice() *schema.Resource { "user_data": { Type: schema.TypeString, Optional: true, - ForceNew: true, Sensitive: true, }, - "public_ipv4_subnet_size": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - }, - "ipxe_script_url": { Type: schema.TypeString, Optional: true, @@ -266,6 +286,18 @@ func resourcePacketDevice() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "wait_for_reservation_deprovision": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, + "force_detach_volumes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, }, } } @@ -285,14 +317,21 @@ func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error } } + var addressTypesSlice []packngo.IPAddressCreateRequest + _, ok = d.GetOk("ip_address") + if ok { + arr := d.Get("ip_address").([]interface{}) + addressTypesSlice = getNewIPAddressSlice(arr) + } + createRequest := &packngo.DeviceCreateRequest{ - Hostname: d.Get("hostname").(string), - Plan: d.Get("plan").(string), - Facility: facs, - OS: d.Get("operating_system").(string), - BillingCycle: d.Get("billing_cycle").(string), - ProjectID: d.Get("project_id").(string), - PublicIPv4SubnetSize: d.Get("public_ipv4_subnet_size").(int), + Hostname: d.Get("hostname").(string), + Plan: d.Get("plan").(string), + Facility: facs, + IPAddresses: addressTypesSlice, + OS: d.Get("operating_system").(string), + BillingCycle: d.Get("billing_cycle").(string), + ProjectID: d.Get("project_id").(string), } targetNetworkState, nTypeOk := d.GetOk("network_type") if attr, ok := d.GetOk("user_data"); ok { @@ -305,6 +344,11 @@ func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error if attr, ok := d.GetOk("hardware_reservation_id"); ok { createRequest.HardwareReservationID = attr.(string) + } else { + wfrd := "wait_for_reservation_deprovision" + if d.Get(wfrd).(bool) { + return friendlyError(fmt.Errorf("You can't set %s when not using a hardware reservation", wfrd)) + } } if createRequest.OS == "custom_ipxe" { @@ -347,30 +391,47 @@ func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error if err != nil { return errwrap.Wrapf("storage param contains invalid JSON: {{err}}", err) } - createRequest.Storage = s + var cpr packngo.CPR + err = json.Unmarshal([]byte(s), &cpr) + if err != nil { + return errwrap.Wrapf("Error parsing Storage string: {{err}}", err) + } + createRequest.Storage = &cpr } newDevice, _, err := client.Devices.Create(createRequest) if err != nil { - return friendlyError(err) + retErr := friendlyError(err) + if isNotFound(retErr) { + retErr = fmt.Errorf("%s, make sure project \"%s\" exists", retErr, createRequest.ProjectID) + } + return retErr } d.SetId(newDevice.ID) // Wait for the device so we can get the networking attributes that show up after a while. - _, err = waitForDeviceAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) + state, err := waitForDeviceAttribute(d, []string{"active", "failed"}, []string{"queued", "provisioning"}, "state", meta) if err != nil { - if isForbidden(err) { + d.SetId("") + fErr := friendlyError(err) + if isForbidden(fErr) { // If the device doesn't get to the active state, we can't recover it from here. - d.SetId("") return errors.New("provisioning time limit exceeded; the Packet team will investigate") } - return err + return fErr + } + if state != "active" { + d.SetId("") + return fmt.Errorf("Device in non-active state \"%s\"", state) } if nTypeOk { - _, err = waitForDeviceAttribute(d, "layer3", []string{"hybrid", "layer2-bonded", "layer2-individual"}, "network_type", meta) + _, err := waitForDeviceAttribute(d, []string{"layer3"}, []string{"hybrid", "layer2-bonded", "layer2-individual"}, "network_type", meta) + if err != nil { + return err + } tns := targetNetworkState.(string) if tns != "layer3" { @@ -384,45 +445,6 @@ func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error return resourcePacketDeviceRead(d, meta) } -type NetworkInfo struct { - Networks []map[string]interface{} - IPv4SubnetSize int - Host string - PublicIPv4 string - PublicIPv6 string - PrivateIPv4 string -} - -func getNetworkInfo(ips []*packngo.IPAddressAssignment) NetworkInfo { - ni := NetworkInfo{Networks: make([]map[string]interface{}, 0, 1)} - for _, ip := range ips { - network := map[string]interface{}{ - "address": ip.Address, - "gateway": ip.Gateway, - "family": ip.AddressFamily, - "cidr": ip.CIDR, - "public": ip.Public, - } - ni.Networks = append(ni.Networks, network) - - // Initial device IPs are fixed and marked as "Management" - if ip.Management { - if ip.AddressFamily == 4 { - if ip.Public { - ni.Host = ip.Address - ni.IPv4SubnetSize = ip.CIDR - ni.PublicIPv4 = ip.Address - } else { - ni.PrivateIPv4 = ip.Address - } - } else { - ni.PublicIPv6 = ip.Address - } - } - } - return ni -} - func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) @@ -441,7 +463,8 @@ func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { d.Set("hostname", device.Hostname) d.Set("plan", device.Plan.Slug) - d.Set("facility", device.Facility.Code) + d.Set("deployed_facility", device.Facility.Code) + d.Set("facilities", []string{device.Facility.Code}) d.Set("operating_system", device.OS.Slug) d.Set("state", device.State) d.Set("billing_cycle", device.BillingCycle) @@ -452,16 +475,36 @@ func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { d.Set("always_pxe", device.AlwaysPXE) d.Set("root_password", device.RootPassword) d.Set("project_id", device.Project.ID) - storageString, err := structure.FlattenJsonToString(device.Storage) - if err != nil { - return fmt.Errorf("[ERR] Error getting storage JSON string for device (%s): %s", d.Id(), err) + if device.Storage != nil { + rawStorageBytes, err := json.Marshal(device.Storage) + if err != nil { + return fmt.Errorf("[ERR] Error getting storage JSON string for device (%s): %s", d.Id(), err) + } + + storageString, err := structure.NormalizeJsonString(string(rawStorageBytes)) + if err != nil { + return fmt.Errorf("[ERR] Errori normalizing storage JSON string for device (%s): %s", d.Id(), err) + } + d.Set("storage", storageString) } - d.Set("storage", storageString) if len(device.HardwareReservation.Href) > 0 { d.Set("hardware_reservation_id", path.Base(device.HardwareReservation.Href)) } - d.Set("network_type", device.NetworkType) + networkType, err := device.GetNetworkType() + if err != nil { + return err + } + d.Set("network_type", networkType) + + wfrd := "wait_for_reservation_deprovision" + if _, ok := d.GetOk(wfrd); !ok { + d.Set(wfrd, nil) + } + fdv := "force_detach_volumes" + if _, ok := d.GetOk(fdv); !ok { + d.Set(fdv, nil) + } d.Set("tags", device.Tags) keyIDs := []string{} @@ -480,7 +523,6 @@ func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { }) d.Set("network", networkInfo.Networks) - d.Set("public_ipv4_subnet_size", networkInfo.IPv4SubnetSize) d.Set("access_public_ipv4", networkInfo.PublicIPv4) d.Set("access_private_ipv4", networkInfo.PrivateIPv4) d.Set("access_public_ipv6", networkInfo.PublicIPv6) @@ -498,33 +540,6 @@ func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { return nil } -func getNetworkRank(family int, public bool) int { - switch { - case family == 4 && public: - return 0 - case family == 6: - return 1 - case family == 4 && public: - return 2 - } - return 3 -} - -func getPorts(ps []packngo.Port) []map[string]interface{} { - ret := make([]map[string]interface{}, 0, 1) - for _, p := range ps { - port := map[string]interface{}{ - "name": p.Name, - "id": p.ID, - "type": p.Type, - "mac": p.Data.MAC, - "bonded": p.Data.Bonded, - } - ret = append(ret, port) - } - return ret -} - func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) @@ -545,6 +560,10 @@ func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error dDesc := d.Get("description").(string) ur.Description = &dDesc } + if d.HasChange("user_data") { + dUserData := d.Get("user_data").(string) + ur.UserData = &dUserData + } if d.HasChange("hostname") { dHostname := d.Get("hostname").(string) ur.Hostname = &dHostname @@ -593,60 +612,25 @@ func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) - if _, err := client.Devices.Delete(d.Id()); err != nil { - return friendlyError(err) + fdvIf, fdvOk := d.GetOk("force_detach_volumes") + fdv := false + if fdvOk && fdvIf.(bool) { + fdv = true } - return nil -} - -func waitForDeviceAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{target}, - Refresh: newDeviceStateRefreshFunc(d, attribute, meta), - Timeout: 60 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + if _, err := client.Devices.Delete(d.Id(), fdv); err != nil { + return friendlyError(err) } - return stateConf.WaitForState() -} - -func newDeviceStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { - client := meta.(*packngo.Client) - return func() (interface{}, string, error) { - if err := resourcePacketDeviceRead(d, meta); err != nil { - return nil, "", err - } - - if attr, ok := d.GetOk(attribute); ok { - device, _, err := client.Devices.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project"}}) + resId, resIdOk := d.GetOk("hardware_reservation_id") + if resIdOk { + wfrd, wfrdOK := d.GetOk("wait_for_reservation_deprovision") + if wfrdOK && wfrd.(bool) { + err := waitUntilReservationProvisionable(resId.(string), meta) if err != nil { - return nil, "", friendlyError(err) + return err } - return &device, attr.(string), nil } - - return nil, "", nil - } -} - -// powerOnAndWait Powers on the device and waits for it to be active. -func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - _, err := client.Devices.PowerOn(d.Id()) - if err != nil { - return friendlyError(err) - } - - _, err = waitForDeviceAttribute(d, "active", []string{"off"}, "state", client) - return err -} - -func validateFacilityForDevice(v interface{}, k string) (ws []string, errors []error) { - if v.(string) == "any" { - errors = append(errors, fmt.Errorf(`Cannot use facility: "any"`)) } - return + return nil } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go index 710f0db2198..250f9f65348 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ip_attachment.go @@ -5,7 +5,7 @@ import ( "log" "path" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go index ac821e38eaa..a595b2747fd 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_organization.go @@ -1,7 +1,7 @@ package packet import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -11,6 +11,9 @@ func resourcePacketOrganization() *schema.Resource { Read: resourcePacketOrganizationRead, Update: resourcePacketOrganizationUpdate, Delete: resourcePacketOrganizationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go index f1fa216cd70..06c36dd5706 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_port_vlan_attachment.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -13,6 +13,7 @@ func resourcePacketPortVlanAttachment() *schema.Resource { Create: resourcePacketPortVlanAttachmentCreate, Read: resourcePacketPortVlanAttachmentRead, Delete: resourcePacketPortVlanAttachmentDelete, + Update: resourcePacketPortVlanAttachmentUpdate, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, @@ -47,6 +48,11 @@ func resourcePacketPortVlanAttachment() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "native": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, } } @@ -57,13 +63,16 @@ func resourcePacketPortVlanAttachmentCreate(d *schema.ResourceData, meta interfa pName := d.Get("port_name").(string) vlanVNID := d.Get("vlan_vnid").(int) - dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{Includes: []string{"virtual_networks,project"}}) + dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{ + Includes: []string{"virtual_networks,project,native_virtual_network"}, + }) if err != nil { return err } portFound := false vlanFound := false + vlanID := "" var port packngo.Port for _, p := range dev.NetworkPorts { if p.Name == pName { @@ -72,6 +81,7 @@ func resourcePacketPortVlanAttachmentCreate(d *schema.ResourceData, meta interfa for _, n := range p.AttachedVirtualNetworks { if vlanVNID == n.VXLAN { vlanFound = true + vlanID = n.ID break } } @@ -81,34 +91,50 @@ func resourcePacketPortVlanAttachmentCreate(d *schema.ResourceData, meta interfa if !portFound { return fmt.Errorf("Device %s doesn't have port %s", deviceID, pName) } + + par := &packngo.PortAssignRequest{PortID: port.ID} if vlanFound { log.Printf("Port %s already has VLAN %d assigned", pName, vlanVNID) - return nil - } + par.VirtualNetworkID = vlanID + } else { + facility := dev.Facility.Code + vlans, _, err := client.ProjectVirtualNetworks.List(dev.Project.ID, nil) + if err != nil { + return err + } + for _, n := range vlans.VirtualNetworks { + if (n.VXLAN == vlanVNID) && (n.FacilityCode == facility) { + vlanID = n.ID + } + } + if len(vlanID) == 0 { + return fmt.Errorf("VLAN with VNID %d doesn't exist in facilty %s", vlanVNID, facility) + } - vlanID := "" - facility := dev.Facility.Code - vlans, _, err := client.ProjectVirtualNetworks.List(dev.Project.ID, nil) - if err != nil { - return err - } - for _, n := range vlans.VirtualNetworks { - if (n.VXLAN == vlanVNID) && (n.FacilityCode == facility) { - vlanID = n.ID + par.VirtualNetworkID = vlanID + + // Packet doesn't allow multiple VLANs to be assigned + // to the same port at the same time + lockId := "vlan-attachment-" + port.ID + packetMutexKV.Lock(lockId) + defer packetMutexKV.Unlock(lockId) + + _, _, err = client.DevicePorts.Assign(par) + if err != nil { + return err } } - if len(vlanID) == 0 { - return fmt.Errorf("VLAN with VNID %d doesn't exist in facilty %s", vlanVNID, facility) - } - par := &packngo.PortAssignRequest{PortID: port.ID, VirtualNetworkID: vlanID} + d.SetId(port.ID + ":" + vlanID) - _, _, err = client.DevicePorts.Assign(par) - if err != nil { - return err + native := d.Get("native").(bool) + if native { + _, _, err = client.DevicePorts.AssignNative(par) + if err != nil { + return err + } } - d.SetId(port.ID + ":" + vlanID) return resourcePacketPortVlanAttachmentRead(d, meta) } @@ -118,7 +144,7 @@ func resourcePacketPortVlanAttachmentRead(d *schema.ResourceData, meta interface pName := d.Get("port_name").(string) vlanVNID := d.Get("vlan_vnid").(int) - dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{Includes: []string{"virtual_networks,project"}}) + dev, _, err := client.Devices.Get(deviceID, &packngo.GetOptions{Includes: []string{"virtual_networks,project,native_virtual_network"}}) if err != nil { return err } @@ -126,6 +152,7 @@ func resourcePacketPortVlanAttachmentRead(d *schema.ResourceData, meta interface vlanFound := false portID := "" vlanID := "" + vlanNative := false for _, p := range dev.NetworkPorts { if p.Name == pName { portFound = true @@ -134,35 +161,77 @@ func resourcePacketPortVlanAttachmentRead(d *schema.ResourceData, meta interface if vlanVNID == n.VXLAN { vlanFound = true vlanID = n.ID + if p.NativeVirtualNetwork != nil { + vlanNative = vlanID == p.NativeVirtualNetwork.ID + } break } } break } } - d.Set("port_id", portID) - d.Set("vlan_id", vlanID) if !portFound { return fmt.Errorf("Device %s doesn't have port %s", deviceID, pName) } if !vlanFound { - d.SetId(portID) + d.SetId("") } + d.Set("port_id", portID) + d.Set("vlan_id", vlanID) + d.Set("native", vlanNative) return nil } +func resourcePacketPortVlanAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + if d.HasChange("native") { + native := d.Get("native").(bool) + portID := d.Get("port_id").(string) + if native { + vlanID := d.Get("vlan_id").(string) + par := &packngo.PortAssignRequest{PortID: portID, VirtualNetworkID: vlanID} + _, _, err := client.DevicePorts.AssignNative(par) + if err != nil { + return err + } + } else { + _, _, err := client.DevicePorts.UnassignNative(portID) + if err != nil { + return err + } + } + } + return resourcePacketPortVlanAttachmentRead(d, meta) +} + func resourcePacketPortVlanAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) pID := d.Get("port_id").(string) vlanID := d.Get("vlan_id").(string) + native := d.Get("native").(bool) + if native { + _, _, err := client.DevicePorts.UnassignNative(pID) + if err != nil { + return err + } + } par := &packngo.PortAssignRequest{PortID: pID, VirtualNetworkID: vlanID} - client := meta.(*packngo.Client) + lockId := "vlan-detachment-" + pID + packetMutexKV.Lock(lockId) + defer packetMutexKV.Unlock(lockId) portPtr, _, err := client.DevicePorts.Unassign(par) if err != nil { return err } forceBond := d.Get("force_bond").(bool) if forceBond && (len(portPtr.AttachedVirtualNetworks) == 0) { - _, _, err = client.DevicePorts.Bond(&packngo.BondRequest{PortID: pID, BulkEnable: false}) + deviceID := d.Get("device_id").(string) + portName := d.Get("port_name").(string) + port, err := client.DevicePorts.GetPortByName(deviceID, portName) + if err != nil { + return friendlyError(err) + } + _, _, err = client.DevicePorts.Bond(port, false) if err != nil { return friendlyError(err) } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go index 83563888ce5..5b7a8e9d009 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project.go @@ -6,8 +6,8 @@ import ( "regexp" "strings" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/packethost/packngo" ) @@ -64,30 +64,30 @@ func resourcePacketProject() *schema.Resource { }, ValidateFunc: validation.StringMatch(uuidRE, "must be a valid UUID"), }, - "bgp_config": &schema.Schema{ + "bgp_config": { Type: schema.TypeList, MaxItems: 1, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "deployment_type": &schema.Schema{ + "deployment_type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"local", "global"}, false), }, - "asn": &schema.Schema{ + "asn": { Type: schema.TypeInt, Required: true, }, - "md5": &schema.Schema{ + "md5": { Type: schema.TypeString, Optional: true, }, - "status": &schema.Schema{ + "status": { Type: schema.TypeString, Computed: true, }, - "max_prefix": &schema.Schema{ + "max_prefix": { Type: schema.TypeInt, Computed: true, }, diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go index 224ec28bc32..e23bb6cd69c 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_project_ssh_key.go @@ -1,8 +1,7 @@ package packet import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourcePacketProjectSSHKey() *schema.Resource { @@ -14,41 +13,12 @@ func resourcePacketProjectSSHKey() *schema.Resource { } return &schema.Resource{ Create: resourcePacketSSHKeyCreate, - Read: resourcePacketProjectSSHKeyRead, + Read: resourcePacketSSHKeyRead, Update: resourcePacketSSHKeyUpdate, Delete: resourcePacketSSHKeyDelete, - + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: pkeySchema, } } - -func resourcePacketProjectSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - projectID := d.Get("project_id").(string) - projectKeys, _, err := client.SSHKeys.ProjectList(projectID) - if err != nil { - err = friendlyError(err) - if isNotFound(err) { - d.SetId("") - return nil - } - - return err - } - - keyFound := false - for _, k := range projectKeys { - if k.ID == d.Id() { - keyFound = true - d.Set("name", k.Label) - d.Set("public_key", k.Key) - d.Set("fingerprint", k.FingerPrint) - d.Set("created", k.Created) - d.Set("updated", k.Updated) - } - } - if !keyFound { - d.SetId("") - } - return nil -} diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go index ae81abefff6..87e6e58b0df 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_reserved_ip_block.go @@ -4,8 +4,8 @@ import ( "fmt" "path" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/packethost/packngo" ) @@ -75,6 +75,11 @@ func resourcePacketReservedIPBlock() *schema.Resource { Optional: true, ForceNew: true, } + reservedBlockSchema["description"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + } reservedBlockSchema["quantity"] = &schema.Schema{ Type: schema.TypeInt, Required: true, @@ -122,6 +127,10 @@ func resourcePacketReservedIPBlockCreate(d *schema.ResourceData, meta interface{ if typ == "public_ipv4" { req.Facility = &fs } + desc, ok := d.GetOk("description") + if ok { + req.Description = desc.(string) + } projectID := d.Get("project_id").(string) @@ -210,6 +219,9 @@ func resourcePacketReservedIPBlockRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error reading IP address block with ID %s: %s", id, err) } err = loadBlock(d, reservedBlock) + if (reservedBlock.Description != nil) && (*(reservedBlock.Description) != "") { + d.Set("description", *(reservedBlock.Description)) + } d.Set("global", getGlobalBool(reservedBlock)) if err != nil { return err diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_spot_market_request.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_spot_market_request.go index 50ea2aa1768..fe6abd6eda6 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_spot_market_request.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_spot_market_request.go @@ -1,10 +1,11 @@ package packet import ( + "fmt" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -108,7 +109,10 @@ func resourcePacketSpotMarketRequest() *schema.Resource { ForceNew: true, }, }, - Timeouts: resourceDefaultTimeouts, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, } } @@ -270,7 +274,7 @@ func resourcePacketSpotMarketRequestDelete(d *schema.ResourceData, meta interfac } for _, d := range smr.Devices { - _, err := client.Devices.Delete(d.ID) + _, err := client.Devices.Delete(d.ID, true) if err != nil { return err } @@ -289,14 +293,17 @@ func resourceStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource smr, _, err := client.SpotMarketRequests.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "devices", "facilities"}}) if err != nil { - return nil, "", err + return nil, "", fmt.Errorf("Failed to fetch Spot market request with following error: %s", err.Error()) } var finished bool for _, d := range smr.Devices { - dev, _, _ := client.Devices.Get(d.ID, nil) + dev, _, err := client.Devices.Get(d.ID, nil) + if err != nil { + return nil, "", fmt.Errorf("Failed to fetch Device with following error: %s", err.Error()) + } if dev.State != "active" { break } else { diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go index ff9de66672a..6ef2cdc059c 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_ssh_key.go @@ -1,7 +1,9 @@ package packet import ( - "github.com/hashicorp/terraform/helper/schema" + "path" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -31,6 +33,10 @@ func packetSSHKeyCommonFields() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, } } @@ -41,6 +47,9 @@ func resourcePacketSSHKey() *schema.Resource { Read: resourcePacketSSHKeyRead, Update: resourcePacketSSHKeyUpdate, Delete: resourcePacketSSHKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: packetSSHKeyCommonFields(), } @@ -65,9 +74,6 @@ func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error } d.SetId(key.ID) - if isProjectKey { - return resourcePacketProjectSSHKeyRead(d, meta) - } return resourcePacketSSHKeyRead(d, meta) } @@ -89,13 +95,20 @@ func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { return err } + ownerID := path.Base(key.Owner.Href) + d.Set("id", key.ID) d.Set("name", key.Label) d.Set("public_key", key.Key) d.Set("fingerprint", key.FingerPrint) + d.Set("owner_id", ownerID) d.Set("created", key.Created) d.Set("updated", key.Updated) + if key.Owner.Href[:10] == "/projects/" { + d.Set("project_id", ownerID) + } + return nil } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go index 0b3fc1df75d..d81733f4b9f 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_vlan.go @@ -1,7 +1,7 @@ package packet import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -70,6 +70,7 @@ func resourcePacketVlanRead(d *schema.ResourceData, meta interface{}) error { d.Set("description", vlan.Description) d.Set("project_id", vlan.Project.ID) d.Set("vxlan", vlan.VXLAN) + d.Set("facility", vlan.FacilityCode) return nil } diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go index 03464940a30..8f99eef84c0 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume.go @@ -1,12 +1,11 @@ package packet import ( - "errors" "fmt" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) @@ -153,50 +152,33 @@ func resourcePacketVolumeCreate(d *schema.ResourceData, meta interface{}) error d.SetId(newVolume.ID) - _, err = waitForVolumeAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) + err = waitForVolumeState(newVolume.ID, "active", []string{"queued", "provisioning"}, meta) if err != nil { - if isForbidden(err) { - // If the volume doesn't get to the active state, we can't recover it from here. - d.SetId("") - - return errors.New("provisioning time limit exceeded; the Packet team will investigate") - } + d.SetId("") return err } return resourcePacketVolumeRead(d, meta) } -func waitForVolumeAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { +func waitForVolumeState(volumeID string, target string, pending []string, meta interface{}) error { stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{target}, - Refresh: newVolumeStateRefreshFunc(d, attribute, meta), + Pending: pending, + Target: []string{target}, + Refresh: func() (interface{}, string, error) { + client := meta.(*packngo.Client) + v, _, err := client.Volumes.Get(volumeID, &packngo.GetOptions{Includes: []string{"project", "snapshot_policies", "facility"}}) + if err == nil { + return 42, v.State, nil + } + return 42, "error", err + }, Timeout: 60 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } - return stateConf.WaitForState() -} - -func newVolumeStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { - client := meta.(*packngo.Client) - - return func() (interface{}, string, error) { - if err := resourcePacketVolumeRead(d, meta); err != nil { - return nil, "", err - } - - if attr, ok := d.GetOk(attribute); ok { - volume, _, err := client.Volumes.Get(d.Id(), &packngo.GetOptions{Includes: []string{"project", "snapshot_policies", "facility"}}) - if err != nil { - return nil, "", friendlyError(err) - } - return &volume, attr.(string), nil - } - - return nil, "", nil - } + _, err := stateConf.WaitForState() + return err } func resourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { diff --git a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go index 1316f793c09..fa819a7d4d3 100644 --- a/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go +++ b/vendor/github.com/terraform-providers/terraform-provider-packet/packet/resource_packet_volume_attachment.go @@ -4,7 +4,7 @@ import ( "log" "path/filepath" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/packethost/packngo" ) diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go index 7c498e90d93..18c8bc0567c 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -89,6 +89,7 @@ func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { return nil, errors.New("chacha20: wrong nonce size") } + key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint c.key = [8]uint32{ binary.LittleEndian.Uint32(key[0:4]), binary.LittleEndian.Uint32(key[4:8]), @@ -260,7 +261,9 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { s.precompDone = true } - for i := 0; i < len(src); i += blockSize { + // A condition of len(src) > 0 would be sufficient, but this also + // acts as a bounds check elimination hint. + for len(src) >= 64 && len(dst) >= 64 { // The remainder of the first column round. fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) @@ -285,49 +288,31 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) } - // Finally, add back the initial state to generate the key stream. - x0 += c0 - x1 += c1 - x2 += c2 - x3 += c3 - x4 += c4 - x5 += c5 - x6 += c6 - x7 += c7 - x8 += c8 - x9 += c9 - x10 += c10 - x11 += c11 - x12 += s.counter - x13 += c13 - x14 += c14 - x15 += c15 + // Add back the initial state to generate the key stream, then + // XOR the key stream with the source and write out the result. + addXor(dst[0:4], src[0:4], x0, c0) + addXor(dst[4:8], src[4:8], x1, c1) + addXor(dst[8:12], src[8:12], x2, c2) + addXor(dst[12:16], src[12:16], x3, c3) + addXor(dst[16:20], src[16:20], x4, c4) + addXor(dst[20:24], src[20:24], x5, c5) + addXor(dst[24:28], src[24:28], x6, c6) + addXor(dst[28:32], src[28:32], x7, c7) + addXor(dst[32:36], src[32:36], x8, c8) + addXor(dst[36:40], src[36:40], x9, c9) + addXor(dst[40:44], src[40:44], x10, c10) + addXor(dst[44:48], src[44:48], x11, c11) + addXor(dst[48:52], src[48:52], x12, s.counter) + addXor(dst[52:56], src[52:56], x13, c13) + addXor(dst[56:60], src[56:60], x14, c14) + addXor(dst[60:64], src[60:64], x15, c15) s.counter += 1 if s.counter == 0 { panic("chacha20: internal error: counter overflow") } - in, out := src[i:], dst[i:] - in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint - - // XOR the key stream with the source and write out the result. - xor(out[0:], in[0:], x0) - xor(out[4:], in[4:], x1) - xor(out[8:], in[8:], x2) - xor(out[12:], in[12:], x3) - xor(out[16:], in[16:], x4) - xor(out[20:], in[20:], x5) - xor(out[24:], in[24:], x6) - xor(out[28:], in[28:], x7) - xor(out[32:], in[32:], x8) - xor(out[36:], in[36:], x9) - xor(out[40:], in[40:], x10) - xor(out[44:], in[44:], x11) - xor(out[48:], in[48:], x12) - xor(out[52:], in[52:], x13) - xor(out[56:], in[56:], x14) - xor(out[60:], in[60:], x15) + src, dst = src[blockSize:], dst[blockSize:] } } diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go index 0110c9865af..c2d04851e0d 100644 --- a/vendor/golang.org/x/crypto/chacha20/xor.go +++ b/vendor/golang.org/x/crypto/chacha20/xor.go @@ -13,10 +13,10 @@ const unaligned = runtime.GOARCH == "386" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x" -// xor reads a little endian uint32 from src, XORs it with u and +// addXor reads a little endian uint32 from src, XORs it with (a + b) and // places the result in little endian byte order in dst. -func xor(dst, src []byte, u uint32) { - _, _ = src[3], dst[3] // eliminate bounds checks +func addXor(dst, src []byte, a, b uint32) { + _, _ = src[3], dst[3] // bounds check elimination hint if unaligned { // The compiler should optimize this code into // 32-bit unaligned little endian loads and stores. @@ -27,15 +27,16 @@ func xor(dst, src []byte, u uint32) { v |= uint32(src[1]) << 8 v |= uint32(src[2]) << 16 v |= uint32(src[3]) << 24 - v ^= u + v ^= a + b dst[0] = byte(v) dst[1] = byte(v >> 8) dst[2] = byte(v >> 16) dst[3] = byte(v >> 24) } else { - dst[0] = src[0] ^ byte(u) - dst[1] = src[1] ^ byte(u>>8) - dst[2] = src[2] ^ byte(u>>16) - dst[3] = src[3] ^ byte(u>>24) + a += b + dst[0] = src[0] ^ byte(a) + dst[1] = src[1] ^ byte(a>>8) + dst[2] = src[2] ^ byte(a>>16) + dst[3] = src[3] ^ byte(a>>24) } } diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index 51f740500e6..b909471cc06 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -102,8 +102,9 @@ type ConstraintExtension struct { // AddedKey describes an SSH key to be added to an Agent. type AddedKey struct { - // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or - // *ecdsa.PrivateKey, which will be inserted into the agent. + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey, + // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the + // agent. PrivateKey interface{} // Certificate, if not nil, is communicated to the agent and will be // stored with the key. @@ -566,6 +567,17 @@ func (c *client) insertKey(s interface{}, comment string, constraints []byte) er Comments: comment, Constraints: constraints, }) + case ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(k)[32:], + Priv: []byte(k), + Comments: comment, + Constraints: constraints, + }) + // This function originally supported only *ed25519.PrivateKey, however the + // general idiom is to pass ed25519.PrivateKey by value, not by pointer. + // We still support the pointer variant for backwards compatibility. case *ed25519.PrivateKey: req = ssh.Marshal(ed25519KeyMsg{ Type: ssh.KeyAlgoED25519, @@ -683,6 +695,18 @@ func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string Comments: comment, Constraints: constraints, }) + case ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(k)[32:], + Priv: []byte(k), + Comments: comment, + Constraints: constraints, + }) + // This function originally supported only *ed25519.PrivateKey, however the + // general idiom is to pass ed25519.PrivateKey by value, not by pointer. + // We still support the pointer variant for backwards compatibility. case *ed25519.PrivateKey: req = ssh.Marshal(ed25519CertMsg{ Type: cert.Type(), diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index b0204ee59f2..8bd6b3daff5 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -119,7 +119,7 @@ var cipherModes = map[string]*cipherMode{ chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely // needed, it's possible to specify a custom Config to enable it. // You should expect that an active attacker can recover plaintext if // you do. diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 6c3c648fc95..7eedb209fa7 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -572,7 +572,7 @@ func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, e return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil } -func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -677,7 +677,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 06f537c135a..31f26349a05 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -1246,15 +1246,23 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { } key, iv := k[:32], k[32:] - if cipherName != "aes256-ctr" { - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr") - } c, err := aes.NewCipher(key) if err != nil { return nil, err } - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) + switch cipherName { + case "aes256-ctr": + ctr := cipher.NewCTR(c, iv) + ctr.XORKeyStream(privKeyBlock, privKeyBlock) + case "aes256-cbc": + if len(privKeyBlock)%c.BlockSize() != 0 { + return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") + } + cbc := cipher.NewCBCDecrypter(c, iv) + cbc.CryptBlocks(privKeyBlock, privKeyBlock) + default: + return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") + } return privKeyBlock, nil } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index d1b4fca3a94..2ffb97bfb8a 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal { } const ( + keyCtrlC = 3 keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' @@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { switch b[0] { case 1: // ^A return keyHome, b[1:] + case 2: // ^B + return keyLeft, b[1:] case 5: // ^E return keyEnd, b[1:] + case 6: // ^F + return keyRight, b[1:] case 8: // ^H return keyBackspace, b[1:] case 11: // ^K @@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) { return "", io.EOF } } + if key == keyCtrlC { + return "", io.EOF + } if key == keyPasteStart { t.pasteActive = true if len(t.line) == 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 7bc4bdea8e7..6057ace67a6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -699,7 +699,7 @@ github.com/hashicorp/go-multierror ## explicit github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin -# github.com/hashicorp/go-retryablehttp v0.6.4 +# github.com/hashicorp/go-retryablehttp v0.6.6 github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-safetemp v1.0.0 github.com/hashicorp/go-safetemp @@ -779,15 +779,12 @@ github.com/hashicorp/terraform/configs/configupgrade github.com/hashicorp/terraform/configs/hcl2shim github.com/hashicorp/terraform/dag github.com/hashicorp/terraform/experiments -github.com/hashicorp/terraform/flatmap -github.com/hashicorp/terraform/helper/config github.com/hashicorp/terraform/helper/didyoumean github.com/hashicorp/terraform/helper/experiment github.com/hashicorp/terraform/helper/hashcode github.com/hashicorp/terraform/helper/hilmapstructure github.com/hashicorp/terraform/helper/logging github.com/hashicorp/terraform/helper/plugin -github.com/hashicorp/terraform/helper/resource github.com/hashicorp/terraform/helper/schema github.com/hashicorp/terraform/helper/slowmessage github.com/hashicorp/terraform/helper/structure @@ -1069,7 +1066,7 @@ github.com/ovirt/terraform-provider-ovirt/ovirt # github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db ## explicit github.com/packer-community/winrmcp/winrmcp -# github.com/packethost/packngo v0.2.0 +# github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f ## explicit github.com/packethost/packngo # github.com/pborman/uuid v1.2.0 @@ -1434,7 +1431,7 @@ github.com/terraform-providers/terraform-provider-local/local # github.com/terraform-providers/terraform-provider-openstack v1.28.0 ## explicit github.com/terraform-providers/terraform-provider-openstack/openstack -# github.com/terraform-providers/terraform-provider-packet v1.7.2 +# github.com/terraform-providers/terraform-provider-packet v1.7.3-0.20200512085448-9717adf77547 ## explicit github.com/terraform-providers/terraform-provider-packet/packet # github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f @@ -1563,7 +1560,7 @@ go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate # go4.org v0.0.0-20191010144846-132d2879e1e9 go4.org/errorutil -# golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 +# golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a ## explicit golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish From a909808f8211b0bb781f7e2c31c754b637d7b604 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Fri, 21 Aug 2020 13:24:11 -0400 Subject: [PATCH 07/11] fixup! fill in packet Platform and Metadata types --- .gitignore | 1 + data/data/bootstrap/packet/OWNERS | 7 + data/data/packet/bootstrap/main.tf | 155 +++++++ data/data/packet/bootstrap/output.tf | 3 + data/data/packet/bootstrap/variables.tf | 17 + data/data/packet/dns/dns.tf | 57 +++ data/data/packet/dns/variables.tf | 7 + data/data/packet/main.tf | 20 +- data/data/packet/master/main.tf | 12 + data/data/packet/master/outputs.tf | 3 + data/data/packet/master/variables.tf | 14 + data/data/packet/variables-packet.tf | 10 +- docs/user/customization.md | 2 + docs/user/packet/customization.md | 5 + go.mod | 1 + go.sum | 277 +++++++++++++ pkg/asset/cluster/tfvars.go | 28 +- pkg/asset/rhcos/image.go | 3 + pkg/tfvars/packet/packet.go | 37 +- .../cluster-api-provider-packet/LICENSE | 14 + .../pkg/apis/packetprovider/v1alpha1/doc.go | 23 ++ .../packetclusterproviderspec_types.go | 49 +++ .../packetclusterproviderstatus_types.go | 62 +++ .../packetmachineproviderspec_types.go | 80 ++++ .../packetmachineproviderstatus_types.go | 62 +++ .../apis/packetprovider/v1alpha1/register.go | 46 +++ .../pkg/apis/packetprovider/v1alpha1/types.go | 18 + .../v1alpha1/zz_generated.deepcopy.go | 378 ++++++++++++++++++ vendor/modules.txt | 4 + .../pkg/runtime/scheme/scheme.go | 29 ++ 30 files changed, 1399 insertions(+), 25 deletions(-) create mode 100644 data/data/bootstrap/packet/OWNERS create mode 100644 docs/user/packet/customization.md create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/LICENSE create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/doc.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderspec_types.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderstatus_types.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderspec_types.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderstatus_types.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/register.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/types.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go diff --git a/.gitignore b/.gitignore index 7237464fdf4..7ee8f093d99 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /bin/ +.openshift_install_state.json .openshift_install.log .vscode diff --git a/data/data/bootstrap/packet/OWNERS b/data/data/bootstrap/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/data/data/bootstrap/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/data/data/packet/bootstrap/main.tf b/data/data/packet/bootstrap/main.tf index e69de29bb2d..46be3c5f28b 100644 --- a/data/data/packet/bootstrap/main.tf +++ b/data/data/packet/bootstrap/main.tf @@ -0,0 +1,155 @@ + +locals { + arch = "x86_64" + coreos_baseurl = "http://54.172.173.155/pub/openshift-v4/dependencies/rhcos" + coreos_url = "${local.coreos_baseurl}/${var.ocp_version}/${var.ocp_version}.${var.ocp_version_zstream}" + coreos_filenm = "rhcos-${var.ocp_version}.${var.ocp_version_zstream}-${local.arch}" + coreos_img = "${local.coreos_filenm}-metal.${local.arch}.raw.gz" + coreos_kernel = "${local.coreos_filenm}-installer-kernel-${local.arch}" + coreos_initrd = "${local.coreos_filenm}-installer-initramfs.${local.arch}.img" +} + +data "template_file" "user_data" { + template = file("${path.module}/templates/user_data_${var.operating_system}.sh") +} + +data "template_file" "ipxe_script" { + depends_on = [packet_device.lb] + for_each = toset(var.nodes) + template = file("${path.module}/templates/ipxe.tpl") + + vars = { + node_type = each.value + bastion_ip = packet_device.lb.access_public_ipv4 + ocp_version = var.ocp_version + ocp_version_zstream = var.ocp_version_zstream + } +} + +data "template_file" "ignition_append" { + depends_on = [packet_device.lb] + for_each = toset(var.nodes) + template = file("${path.module}/templates/ignition-append.json.tpl") + + vars = { + node_type = each.value + bastion_ip = packet_device.lb.access_public_ipv4 + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + } +} + +resource "packet_device" "lb" { + hostname = "lb-0.${var.cluster_name}.${var.cluster_basedomain}" + plan = var.plan + facilities = [var.facility] + operating_system = var.operating_system + billing_cycle = var.billing_cycle + project_id = var.project_id + user_data = data.template_file.user_data.rendered + +} + +resource "null_resource" "dircheck" { + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.lb.access_public_ipv4 + } + + + inline = [ + "while [ ! -d /usr/share/nginx/html ]; do sleep 2; done; ls /usr/share/nginx/html/", + "while [ ! -f /usr/lib/systemd/system/nfs-server.service ]; do sleep 2; done; ls /usr/lib/systemd/system/nfs-server.service" + ] + } +} + +resource "null_resource" "ocp_install_ignition" { + + depends_on = [null_resource.dircheck] + + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.lb.access_public_ipv4 + } + + + inline = [ + "curl -o /usr/share/nginx/html/${local.coreos_img} ${local.coreos_url}/${local.coreos_img}", + "curl -o /usr/share/nginx/html/${local.coreos_kernel} ${local.coreos_url}/${local.coreos_kernel}", + "curl -o /usr/share/nginx/html/${local.coreos_initrd} ${local.coreos_url}/${local.coreos_initrd}", + "chmod -R 0755 /usr/share/nginx/html/" + ] + } +} + +resource "null_resource" "ipxe_files" { + + depends_on = [null_resource.dircheck] + for_each = data.template_file.ipxe_script + + provisioner "file" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.lb.access_public_ipv4 + } + + content = each.value.rendered + destination = "/usr/share/nginx/html/${each.key}.ipxe" + } + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.lb.access_public_ipv4 + } + + + inline = [ + "chmod -R 0755 /usr/share/nginx/html/", + ] + } +} + +resource "null_resource" "ignition_append_files" { + + depends_on = [null_resource.dircheck] + for_each = data.template_file.ignition_append + + provisioner "file" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.lb.access_public_ipv4 + } + + content = each.value.rendered + destination = "/usr/share/nginx/html/${each.key}-append.ign" + } + + provisioner "remote-exec" { + + connection { + private_key = file(var.ssh_private_key_path) + host = packet_device.lb.access_public_ipv4 + } + + + inline = [ + "chmod -R 0755 /usr/share/nginx/html/", + ] + } +} + +output "finished" { + depends_on = [null_resource.file_uploads, null_resource.ipxe_files] + value = "Loadbalancer provisioning finished." +} \ No newline at end of file diff --git a/data/data/packet/bootstrap/output.tf b/data/data/packet/bootstrap/output.tf index e69de29bb2d..05bf5e9c23c 100644 --- a/data/data/packet/bootstrap/output.tf +++ b/data/data/packet/bootstrap/output.tf @@ -0,0 +1,3 @@ +output "lb_ip" { + value = packet_device.lb.access_public_ipv4 +} \ No newline at end of file diff --git a/data/data/packet/bootstrap/variables.tf b/data/data/packet/bootstrap/variables.tf index e69de29bb2d..edda97b4478 100644 --- a/data/data/packet/bootstrap/variables.tf +++ b/data/data/packet/bootstrap/variables.tf @@ -0,0 +1,17 @@ +variable "depends" { + type = any + default = null +} + +variable "ssh_private_key_path" {} +variable "cluster_name" {} +variable "cluster_basedomain" {} +variable "cf_zone_id" {} +variable "ocp_version" {} +variable "ocp_version_zstream" {} +variable "nodes" { + description = "Generic list of OpenShift node types" + type = list(string) + default = ["bootstrap", "master", "worker"] +} + diff --git a/data/data/packet/dns/dns.tf b/data/data/packet/dns/dns.tf index e69de29bb2d..ba26771d194 100644 --- a/data/data/packet/dns/dns.tf +++ b/data/data/packet/dns/dns.tf @@ -0,0 +1,57 @@ +resource "cloudflare_record" "dns_a_cluster_api" { + zone_id = var.cf_zone_id + type = "A" + name = "api.${var.cluster_name}.${var.cluster_basedomain}" + value = var.node_ips[count.index] + count = (var.node_type == "lb" ? length(var.node_ips) : 0) +} + +resource "cloudflare_record" "dns_a_cluster_api_int" { + zone_id = var.cf_zone_id + type = "A" + name = "api-int.${var.cluster_name}.${var.cluster_basedomain}" + value = var.node_ips[count.index] + count = (var.node_type == "lb" ? length(var.node_ips) : 0) +} + +resource "cloudflare_record" "dns_a_cluster_wildcard_https" { + zone_id = var.cf_zone_id + type = "A" + name = "*.apps.${var.cluster_name}.${var.cluster_basedomain}" + value = var.node_ips[count.index] + count = (var.node_type == "lb" ? length(var.node_ips) : 0) +} + +resource "cloudflare_record" "dns_a_node" { + zone_id = var.cf_zone_id + type = "A" + name = "${var.node_type}-${count.index}.${var.cluster_name}.${var.cluster_basedomain}" + value = var.node_ips[count.index] + count = length(var.node_ips) +} + +resource "cloudflare_record" "dns_a_etcd" { + zone_id = var.cf_zone_id + type = "A" + name = "etcd-${count.index}.${var.cluster_name}.${var.cluster_basedomain}" + value = var.node_ips[count.index] + count = (var.node_type == "master" ? length(var.node_ips) : 0) +} + +resource "cloudflare_record" "dns_srv_etcd" { + zone_id = var.cf_zone_id + type = "SRV" + name = "_etcd-server-ssl._tcp" + count = (var.node_type == "master" ? length(var.node_ips) : 0) + + data = { + service = "_etcd-server-ssl" + proto = "_tcp" + name = "${var.cluster_name}.${var.cluster_basedomain}" + priority = 0 + weight = 10 + port = 2380 + target = "etcd-${count.index}.${var.cluster_name}.${var.cluster_basedomain}" + } + +} \ No newline at end of file diff --git a/data/data/packet/dns/variables.tf b/data/data/packet/dns/variables.tf index e69de29bb2d..2d5a953e3b9 100644 --- a/data/data/packet/dns/variables.tf +++ b/data/data/packet/dns/variables.tf @@ -0,0 +1,7 @@ +variable "cf_zone_id" {} +variable "node_type" {} +variable "cluster_name" {} +variable "cluster_basedomain" {} +variable "node_ips" { + type = list +} diff --git a/data/data/packet/main.tf b/data/data/packet/main.tf index b2446cc4316..de2abba5da9 100644 --- a/data/data/packet/main.tf +++ b/data/data/packet/main.tf @@ -2,6 +2,12 @@ provider "packet" { auth_token = var.packet_auth_token } +terraform { + required_providers { + packet = "~> 3.0.0" + } +} + provider "cloudflare" { email = var.packet_cf_email api_key = var.packet_cf_api_key @@ -9,7 +15,7 @@ provider "cloudflare" { module "bastion" { - source = "./modules/bastion" + source = "./modules/bootstrap" auth_token = var.auth_token project_id = var.project_id facility = var.facility @@ -27,11 +33,11 @@ module "bastion" { module "dns_lb" { source = "./modules/dns" - cluster_name = var.cluster_name - cluster_basedomain = var.cluster_basedomain - cf_zone_id = var.cf_zone_id - node_type = "lb" - node_ips = tolist([module.bastion.lb_ip]) + cluster_name = var.cluster_name + cluster_basedomain = var.cluster_basedomain + cf_zone_id = var.cf_zone_id + node_type = "lb" + node_ips = tolist([module.bastion.lb_ip]) } module "prepare_openshift" { @@ -51,7 +57,7 @@ module "prepare_openshift" { } module "openshift_install" { - source = "./modules/install" + source = "./modules/install" ssh_private_key_path = var.ssh_private_key_path operating_system = var.bastion_operating_system diff --git a/data/data/packet/master/main.tf b/data/data/packet/master/main.tf index e69de29bb2d..914bd745be1 100644 --- a/data/data/packet/master/main.tf +++ b/data/data/packet/master/main.tf @@ -0,0 +1,12 @@ +resource "packet_device" "node" { + depends_on = [var.depends] + hostname = format("%s-%01d.%s.%s", var.node_type, count.index, var.cluster_name, var.cluster_basedomain) + operating_system = "custom_ipxe" + ipxe_script_url = "http://${var.bastion_ip}:8080/${var.node_type}.ipxe" + plan = var.plan + facilities = [var.facility] + count = var.node_count + billing_cycle = "hourly" + project_id = var.project_id +} + diff --git a/data/data/packet/master/outputs.tf b/data/data/packet/master/outputs.tf index e69de29bb2d..964b980e475 100644 --- a/data/data/packet/master/outputs.tf +++ b/data/data/packet/master/outputs.tf @@ -0,0 +1,3 @@ +output "finished" { + value = "Provisioning node type ${var.node_type} finished." +} \ No newline at end of file diff --git a/data/data/packet/master/variables.tf b/data/data/packet/master/variables.tf index e69de29bb2d..df5514e175d 100644 --- a/data/data/packet/master/variables.tf +++ b/data/data/packet/master/variables.tf @@ -0,0 +1,14 @@ +variable "plan" {} +variable "node_count" {} +variable "facility" {} +variable "cluster_name" {} +variable "cluster_basedomain" {} +variable "ssh_private_key_path" {} +variable "project_id" {} +variable "cf_zone_id" {} +variable "bastion_ip" {} +variable "node_type" {} +variable "depends" { + type = any + default = null +} diff --git a/data/data/packet/variables-packet.tf b/data/data/packet/variables-packet.tf index 06436ccce0d..49072f73abb 100644 --- a/data/data/packet/variables-packet.tf +++ b/data/data/packet/variables-packet.tf @@ -64,22 +64,22 @@ variable "packet_count_master" { } variable "packet_count_compute" { - default = "2" + default = "2" description = "Number of Compute Nodes" } variable "packet_cluster_name" { - default = "jr" + default = "jr" description = "Cluster name label" } variable "packet_ocp_version" { - default = "4.4" + default = "4.4" description = "OpenShift minor release version" } variable "packet_ocp_version_zstream" { - default = "3" + default = "3" description = "OpenShift zstream version" } @@ -90,7 +90,7 @@ variable "packet_ocp_cluster_manager_token" { variable "packet_ocp_storage_nfs_enable" { description = "Enable configuration of NFS and NFS-related k8s provisioner/storageClass" default = true -} +} variable "packet_ocp_storage_ocs_enable" { description = "Enable installation of OpenShift Container Storage via operator. This requires a minimum of 3 worker nodes" default = false diff --git a/docs/user/customization.md b/docs/user/customization.md index 147a67dc3a5..3da5f875c86 100644 --- a/docs/user/customization.md +++ b/docs/user/customization.md @@ -54,6 +54,7 @@ The following `install-config.yaml` properties are available: * `azure` (optional object): [Azure-specific properties](azure/customization.md#cluster-scoped-properties). * `openstack` (optional object): [OpenStack-specific properties](openstack/customization.md#cluster-scoped-properties). * `ovirt` (optional object): [oVirt-specific properties](ovirt/customization.md#cluster-scoped-properties). + * `packet` (optional object): [Packet-specific properties](packet/customization.md#cluster-scoped-properties). * `vsphere` (optional object): [vSphere-specific properties](vsphere/customization.md#cluster-scoped-properties). * `proxy` (optional object): The proxy settings for the cluster. If unset, the cluster will not be configured to use a proxy. @@ -83,6 +84,7 @@ The following machine-pool properties are available: * `gcp` (optional object): [GCP-specific properties](gcp/customization.md#machine-pools). * `openstack` (optional object): [OpenStack-specific properties](openstack/customization.md#machine-pools). * `ovirt` (optional object): [oVirt-specific properties](ovirt/customization.md#machine-pools). + * `packet` (optional object): [Packet-specific properties](packet/customization.md#machine-pools). * `vsphere` (optional object): [vSphere-specific properties](vsphere/customization.md#machine-pools). * `replicas` (optional integer): The machine count for the machine pool. diff --git a/docs/user/packet/customization.md b/docs/user/packet/customization.md new file mode 100644 index 00000000000..6b3c89bf066 --- /dev/null +++ b/docs/user/packet/customization.md @@ -0,0 +1,5 @@ + +## Machine Pools + + +## Cluster Scoped Properties diff --git a/go.mod b/go.mod index 60b8acfe6ab..c590748e519 100644 --- a/go.mod +++ b/go.mod @@ -56,6 +56,7 @@ require ( github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156 github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43 + github.com/packethost/cluster-api-provider-packet v0.1.0 github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81 github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible diff --git a/go.sum b/go.sum index 12355aab51b..738fd4c614c 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,7 @@ bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg= cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= @@ -46,6 +47,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -171,6 +173,7 @@ github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4Rq github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd/go.mod h1:idhzw68Q7v4j+rQ2AGyq3OlZW2Jij9mdmGA4/Sk6J0E= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -208,6 +211,7 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-versions v0.0.2-0.20180815153302-64b99f7cb171/go.mod h1:JXY95WvQrPJQtudvNARshgWajS7jNNlM90altXIPNyI= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= +github.com/appscode/jsonpatch v2.0.0+incompatible/go.mod h1:1K4eMi6JSZcKYA1pFcujJgSTIGhqnD29dhBx347hmiw= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -296,6 +300,8 @@ github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= @@ -485,6 +491,7 @@ github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5Jflh github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustinkirkland/golang-petname v0.0.0-20170105215008-242afa0b4f8a/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= github.com/dustinkirkland/golang-petname v0.0.0-20170921220637-d3c2ba80e75e/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= @@ -533,6 +540,7 @@ github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwo github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -684,16 +692,176 @@ github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslW github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-zoo/bone v1.3.0/go.mod h1:HI3Lhb7G3UQcAwEhOJ2WyNcsFtQX1WYHa0Hl4OBbhW8= +github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= +github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= +github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= +github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= +github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= +github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= +github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= +github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= +github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= +github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= +github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= +github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= +github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= +github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= +github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= +github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= +github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= +github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2Kk8PTP0= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= +github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= +github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= +github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= +github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= +github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= +github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= +github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= +github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= +github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= +github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= +github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= +github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= +github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= +github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= +github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= +github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= +github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= +github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= +github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= +github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= +github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= +github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= +github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= +github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= +github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= +github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= +github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= +github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= +github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= +github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= +github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= +github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= +github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= +github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= +github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= +github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= +github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= +github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= +github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= +github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= +github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= +github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= +github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= +github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= +github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= +github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= +github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= +github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= +github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= +github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= +github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= +github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= +github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= +github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= +github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= +github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= +github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= +github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= +github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= +github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= +github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= +github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= +github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= +github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= +github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= +github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= +github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= +github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= +github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= +github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= +github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= +github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= +github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= +github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= +github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= @@ -704,6 +872,7 @@ github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -835,6 +1004,7 @@ github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJ github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gookit/color v1.1.7/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.0.0-20190509032623-7892efa714f1/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= @@ -865,6 +1035,10 @@ github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -873,6 +1047,7 @@ github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7 github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -1100,10 +1275,12 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -1135,6 +1312,8 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/katbyte/terrafmt v0.2.1-0.20200303174203-e6a3e82cb21b/go.mod h1:WRq5tDmK04tcYbEr400zAUWtOK0jix54e8YeHP3IoQg= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= @@ -1154,6 +1333,7 @@ github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM52 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1220,7 +1400,24 @@ github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/maorfr/helm-plugin-utils v0.0.0-20200216074820-36d2fcf6ae86/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= +github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= +github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= +github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= +github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= +github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= +github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= +github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= @@ -1271,6 +1468,7 @@ github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1 github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1330,6 +1528,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= @@ -1358,6 +1557,7 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96d github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= @@ -1514,6 +1714,9 @@ github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOTh github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db h1:9uViuKtx1jrlXLBW/pMnhOfzn3iSEdLase/But/IZRU= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/packethost/cluster-api-provider-packet v0.1.0 h1:JYnmudrCvQZkSUDn4Luyp/VL6yG4w8c82WPgOjGoY5w= +github.com/packethost/cluster-api-provider-packet v0.1.0/go.mod h1:kgGGkDEiQOONoDD+ww6Wgatbz1BMmNQJhQgcqi6/L9M= +github.com/packethost/packngo v0.0.0-20190507131943-1343be729ca2/go.mod h1:RQHg5xR1F614BwJyepfMqrKN+32IH0i7yX+ey43rEeQ= github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f h1:9JqXevAco/bD2ldBRC33NfFIDRZ3Xn1n40M/LS7/vDw= github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f/go.mod h1:erURcsqYzwc9wSb04TX4so+s6F3uZtbXUil0W1LCGHA= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1629,6 +1832,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -1665,6 +1869,7 @@ github.com/seccomp/libseccomp-golang v0.9.0/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= github.com/securego/gosec v0.0.0-20200316084457-7da9f46445fd/go.mod h1:NurAFZsWJAEZjogSwdVPlHkOZB3DOAU7gsPP8VFZCHc= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1681,6 +1886,7 @@ github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOms github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= @@ -1692,6 +1898,7 @@ github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919Lwc github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1705,6 +1912,8 @@ github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe/go.mod h1:ni0Sbl8b github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -1728,6 +1937,7 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= @@ -1749,6 +1959,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= +github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= @@ -1831,6 +2043,8 @@ github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4A github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ= @@ -1917,6 +2131,7 @@ go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= @@ -1955,11 +2170,21 @@ golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2041,11 +2266,15 @@ golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2053,6 +2282,7 @@ golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2099,6 +2329,7 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2120,17 +2351,29 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2219,16 +2462,38 @@ golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -2358,7 +2623,9 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2391,8 +2658,10 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2414,6 +2683,7 @@ gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f h1:AQkMzsSzH gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f/go.mod h1:CaHjv79TCgAvXMSFJSVgonHXYWxnhzI3eoHtnX5UgUo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2431,6 +2701,7 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1 gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -2439,6 +2710,7 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -2501,6 +2773,7 @@ k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+ k8s.io/component-base v0.17.1/go.mod h1:LrBPZkXtlvGjBzDJa0+b7E5Ij4VoAAKrOGudRC5z2eY= k8s.io/cri-api v0.17.1/go.mod h1:BzAkbBHHp81d+aXzbiIcUbilLkbXa40B8mUHOk6EX3s= k8s.io/csi-translation-lib v0.17.1/go.mod h1:EWeHQJcexqar6avuUocMwEJOYkboteNM9ODXa3qoamc= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2510,6 +2783,7 @@ k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949Koz k8s.io/helm v2.16.3+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.2/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= @@ -2563,7 +2837,9 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/cluster-api v0.1.7/go.mod h1:MTxMmx3MSFZNi0RAjj64X+RurP9EEdhrRwLzdFZKTYw= sigs.k8s.io/controller-runtime v0.0.0-20190520212815-96b67f231945/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= +sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4= sigs.k8s.io/controller-runtime v0.3.1-0.20191016212439-2df793d02076/go.mod h1:p2vzQ3RuSVv9YR4AcM0y8TKHQA+0oLXazKFt6Z0OdS8= @@ -2571,6 +2847,7 @@ sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZw sigs.k8s.io/controller-runtime v0.5.1-0.20200330174416-a11a908d91e0/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= sigs.k8s.io/controller-runtime v0.5.2 h1:pyXbUfoTo+HA3jeIfr0vgi+1WtmNh0CwlcnQGLXwsSw= sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= +sigs.k8s.io/controller-tools v0.1.11/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= sigs.k8s.io/controller-tools v0.2.2-0.20190930215132-4752ed2de7d2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index d8b6551e782..4fb5296ca81 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -13,6 +13,7 @@ import ( libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" + packetprovider "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1" "github.com/pkg/errors" "github.com/sirupsen/logrus" awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1" @@ -30,6 +31,7 @@ import ( gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + packetconfig "github.com/openshift/installer/pkg/asset/installconfig/packet" "github.com/openshift/installer/pkg/asset/machines" "github.com/openshift/installer/pkg/asset/openshiftinstall" "github.com/openshift/installer/pkg/asset/rhcos" @@ -488,11 +490,11 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Data: data, }) case packet.Name: + config, err := packetconfig.NewConfig() + if err != nil { + return err + } /* - config, err := packetconfig.NewConfig() - if err != nil { - return err - } con, err := packetconfig.NewConnection() if err != nil { return err @@ -500,14 +502,18 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { */ // TODO(displague) Packet networking - /* - masters, err := mastersAsset.Machines() - if err != nil { - return err - } - */ + masters, err := mastersAsset.Machines() + if err != nil { + return err + } - data, err := packettfvars.TFVars(packettfvars.TFVarsSources{}) + data, err := packettfvars.TFVars(packettfvars.TFVarsSources{ + ControlPlaneConfigs: []*packetprovider.PacketMachineProviderSpec{ + masters[0].Spec.ProviderSpec.Value.Object.(*packetprovider.PacketMachineProviderSpec), + }, + APIURL: config.APIURL, + APIKey: config.APIKey, + }) if err != nil { return errors.Wrapf(err, "failed to get %s Terraform variables", platform) } diff --git a/pkg/asset/rhcos/image.go b/pkg/asset/rhcos/image.go index 56dbbaa0fa4..e0f38daed73 100644 --- a/pkg/asset/rhcos/image.go +++ b/pkg/asset/rhcos/image.go @@ -23,6 +23,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -97,6 +98,8 @@ func osImage(config *types.InstallConfig) (string, error) { osimage, err = rhcos.OpenStack(ctx, arch) case ovirt.Name: osimage, err = rhcos.OpenStack(ctx, arch) + case packet.Name: + osimage, err = rhcos.OpenStack(ctx, arch) case azure.Name: osimage, err = rhcos.VHD(ctx, arch) case baremetal.Name: diff --git a/pkg/tfvars/packet/packet.go b/pkg/tfvars/packet/packet.go index 81d8c0704e5..b31a97ec249 100644 --- a/pkg/tfvars/packet/packet.go +++ b/pkg/tfvars/packet/packet.go @@ -1,13 +1,46 @@ package packet -import "encoding/json" +import ( + "encoding/json" + + packetprovider "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1" +) + +type config struct { + Roles []string `json:"packet_roles,omitempty"` + Facility []string `json:"packet_facility,omitempty"` + OS string `json:"packet_os"` + ProjectID string `json:"packet_project_id"` + BillingCycle string `json:"packet_billing_cycle"` + MachineType string `json:"packet_machine_type"` + SshKeys []string `json:"packet_ssh_keys,omitempty"` +} // TFVarsSources contains the parameters to be converted into Terraform variables type TFVarsSources struct { + ControlPlaneConfigs []*packetprovider.PacketMachineProviderSpec + APIURL string + APIKey string } //TFVars generate Packet-specific Terraform variables func TFVars(sources TFVarsSources) ([]byte, error) { + plane0 := sources.ControlPlaneConfigs[0] + + roles := make([]string, len(plane0.Roles)) + for _, r := range plane0.Roles { + roles = append(roles, string(r)) + } // TODO(displague) fill in the tf vars - return json.MarshalIndent(struct{}{}, "", " ") + cfg := &config{ + Roles: roles, + Facility: plane0.Facility, + OS: plane0.OS, + ProjectID: plane0.ProjectID, + BillingCycle: plane0.BillingCycle, + MachineType: plane0.MachineType, + SshKeys: plane0.SshKeys, + } + + return json.MarshalIndent(cfg, "", " ") } diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/LICENSE b/vendor/github.com/packethost/cluster-api-provider-packet/LICENSE new file mode 100644 index 00000000000..89ca9198dcc --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/LICENSE @@ -0,0 +1,14 @@ +Copyright 2019 Packet Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/doc.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/doc.go new file mode 100644 index 00000000000..42dca3bbfa3 --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the packet v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider +// +k8s:defaulter-gen=TypeMeta +// +groupName=packetprovider.k8s.io +package v1alpha1 diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderspec_types.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderspec_types.go new file mode 100644 index 00000000000..a1479c440ae --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderspec_types.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketClusterProviderSpec is the Schema for the packetclusterproviderspecs API +// +k8s:openapi-gen=true +type PacketClusterProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + ProjectID string `json:"projectID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketClusterProviderSpecList contains a list of PacketClusterProviderSpec +type PacketClusterProviderSpecList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PacketClusterProviderSpec `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PacketClusterProviderSpec{}, &PacketClusterProviderSpecList{}) +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderstatus_types.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderstatus_types.go new file mode 100644 index 00000000000..7f789940ff9 --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetclusterproviderstatus_types.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// PacketClusterProviderStatusSpec defines the desired state of PacketClusterProviderStatus +type PacketClusterProviderStatusSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// PacketClusterProviderStatusStatus defines the observed state of PacketClusterProviderStatus +type PacketClusterProviderStatusStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketClusterProviderStatus is the Schema for the packetclusterproviderstatuses API +// +k8s:openapi-gen=true +type PacketClusterProviderStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PacketClusterProviderStatusSpec `json:"spec,omitempty"` + Status PacketClusterProviderStatusStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketClusterProviderStatusList contains a list of PacketClusterProviderStatus +type PacketClusterProviderStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PacketClusterProviderStatus `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PacketClusterProviderStatus{}, &PacketClusterProviderStatusList{}) +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderspec_types.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderspec_types.go new file mode 100644 index 00000000000..ab4a4e39432 --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderspec_types.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketMachineProviderSpec is the Schema for the packetmachineproviderspecs API +// +k8s:openapi-gen=true +type PacketMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Roles []MachineRole `json:"roles,omitempty"` + Facility []string `json:"facility,omitempty"` + OS string `json:"OS"` + ProjectID string `json:"projectID"` + BillingCycle string `json:"billingCycle"` + MachineType string `json:"machineType"` + SshKeys []string `json:"sshKeys,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketMachineProviderSpecList contains a list of PacketMachineProviderSpec +type PacketMachineProviderSpecList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PacketMachineProviderSpec `json:"items"` +} + +// what software and configuration will be used when provisioning and managing +// the Machine. A single Machine may have more than one role, and the list and +// definitions of supported roles is expected to evolve over time. +// +// Currently, only two roles are supported: Master and Node. In the future, we +// expect user needs to drive the evolution and granularity of these roles, +// with new additions accommodating common cluster patterns, like dedicated +// etcd Machines. +// +// +-----------------------+------------------------+ +// | Master present | Master absent | +// +---------------+-----------------------+------------------------| +// | Node present: | Install control plane | Join the cluster as | +// | | and be schedulable | just a node | +// |---------------+-----------------------+------------------------| +// | Node absent: | Install control plane | Invalid configuration | +// | | and be unschedulable | | +// +---------------+-----------------------+------------------------+ +type MachineRole string + +const ( + MasterRole MachineRole = "Master" + NodeRole MachineRole = "Node" +) + +func init() { + SchemeBuilder.Register(&PacketMachineProviderSpec{}, &PacketMachineProviderSpecList{}) +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderstatus_types.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderstatus_types.go new file mode 100644 index 00000000000..7fdcc54f41d --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/packetmachineproviderstatus_types.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// PacketMachineProviderStatusSpec defines the desired state of PacketMachineProviderStatus +type PacketMachineProviderStatusSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// PacketMachineProviderStatusStatus defines the observed state of PacketMachineProviderStatus +type PacketMachineProviderStatusStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketMachineProviderStatus is the Schema for the packetmachineproviderstatuses API +// +k8s:openapi-gen=true +type PacketMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PacketMachineProviderStatusSpec `json:"spec,omitempty"` + Status PacketMachineProviderStatusStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PacketMachineProviderStatusList contains a list of PacketMachineProviderStatus +type PacketMachineProviderStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PacketMachineProviderStatus `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PacketMachineProviderStatus{}, &PacketMachineProviderStatusList{}) +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/register.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/register.go new file mode 100644 index 00000000000..6b6a38bcf90 --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the packet v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider +// +k8s:defaulter-gen=TypeMeta +// +groupName=packetprovider.k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "packetprovider.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/types.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/types.go new file mode 100644 index 00000000000..01166011551 --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/types.go @@ -0,0 +1,18 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PacketMachineProviderConfig contains Config for Packet machines. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PacketMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + ProjectID string `json:"projectID"` + Facilities []string `json:"facility"` + InstanceType string `json:"machineType"` + Tags []string `json:"tags,omitempty"` + OS string `json:"os,omitempty"` + BillingCycle string `json:"billingCycle,omitempty"` +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..234adf44dfc --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,378 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketClusterProviderSpec) DeepCopyInto(out *PacketClusterProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterProviderSpec. +func (in *PacketClusterProviderSpec) DeepCopy() *PacketClusterProviderSpec { + if in == nil { + return nil + } + out := new(PacketClusterProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketClusterProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketClusterProviderSpecList) DeepCopyInto(out *PacketClusterProviderSpecList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacketClusterProviderSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterProviderSpecList. +func (in *PacketClusterProviderSpecList) DeepCopy() *PacketClusterProviderSpecList { + if in == nil { + return nil + } + out := new(PacketClusterProviderSpecList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketClusterProviderSpecList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketClusterProviderStatus) DeepCopyInto(out *PacketClusterProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterProviderStatus. +func (in *PacketClusterProviderStatus) DeepCopy() *PacketClusterProviderStatus { + if in == nil { + return nil + } + out := new(PacketClusterProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketClusterProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketClusterProviderStatusList) DeepCopyInto(out *PacketClusterProviderStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacketClusterProviderStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterProviderStatusList. +func (in *PacketClusterProviderStatusList) DeepCopy() *PacketClusterProviderStatusList { + if in == nil { + return nil + } + out := new(PacketClusterProviderStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketClusterProviderStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketClusterProviderStatusSpec) DeepCopyInto(out *PacketClusterProviderStatusSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterProviderStatusSpec. +func (in *PacketClusterProviderStatusSpec) DeepCopy() *PacketClusterProviderStatusSpec { + if in == nil { + return nil + } + out := new(PacketClusterProviderStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketClusterProviderStatusStatus) DeepCopyInto(out *PacketClusterProviderStatusStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterProviderStatusStatus. +func (in *PacketClusterProviderStatusStatus) DeepCopy() *PacketClusterProviderStatusStatus { + if in == nil { + return nil + } + out := new(PacketClusterProviderStatusStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderConfig) DeepCopyInto(out *PacketMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Facilities != nil { + in, out := &in.Facilities, &out.Facilities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderConfig. +func (in *PacketMachineProviderConfig) DeepCopy() *PacketMachineProviderConfig { + if in == nil { + return nil + } + out := new(PacketMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderSpec) DeepCopyInto(out *PacketMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]MachineRole, len(*in)) + copy(*out, *in) + } + if in.Facility != nil { + in, out := &in.Facility, &out.Facility + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SshKeys != nil { + in, out := &in.SshKeys, &out.SshKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderSpec. +func (in *PacketMachineProviderSpec) DeepCopy() *PacketMachineProviderSpec { + if in == nil { + return nil + } + out := new(PacketMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderSpecList) DeepCopyInto(out *PacketMachineProviderSpecList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacketMachineProviderSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderSpecList. +func (in *PacketMachineProviderSpecList) DeepCopy() *PacketMachineProviderSpecList { + if in == nil { + return nil + } + out := new(PacketMachineProviderSpecList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketMachineProviderSpecList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderStatus) DeepCopyInto(out *PacketMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderStatus. +func (in *PacketMachineProviderStatus) DeepCopy() *PacketMachineProviderStatus { + if in == nil { + return nil + } + out := new(PacketMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderStatusList) DeepCopyInto(out *PacketMachineProviderStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacketMachineProviderStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderStatusList. +func (in *PacketMachineProviderStatusList) DeepCopy() *PacketMachineProviderStatusList { + if in == nil { + return nil + } + out := new(PacketMachineProviderStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketMachineProviderStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderStatusSpec) DeepCopyInto(out *PacketMachineProviderStatusSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderStatusSpec. +func (in *PacketMachineProviderStatusSpec) DeepCopy() *PacketMachineProviderStatusSpec { + if in == nil { + return nil + } + out := new(PacketMachineProviderStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketMachineProviderStatusStatus) DeepCopyInto(out *PacketMachineProviderStatusStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineProviderStatusStatus. +func (in *PacketMachineProviderStatusStatus) DeepCopy() *PacketMachineProviderStatusStatus { + if in == nil { + return nil + } + out := new(PacketMachineProviderStatusStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6057ace67a6..3c1cf8994b1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1066,6 +1066,9 @@ github.com/ovirt/terraform-provider-ovirt/ovirt # github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db ## explicit github.com/packer-community/winrmcp/winrmcp +# github.com/packethost/cluster-api-provider-packet v0.1.0 +## explicit +github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1 # github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f ## explicit github.com/packethost/packngo @@ -2039,6 +2042,7 @@ sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 sigs.k8s.io/cluster-api-provider-openstack/pkg/apis sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1 # sigs.k8s.io/controller-runtime v0.5.2 +sigs.k8s.io/controller-runtime/pkg/runtime/scheme sigs.k8s.io/controller-runtime/pkg/scheme # sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185 ## explicit diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go new file mode 100644 index 00000000000..5b6c9465bfa --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Deprecated: use pkg/scheme instead. +package scheme + +import ( + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder = scheme.Builder From cbf9b084ceaf712e0325d16c4f35e70ea9494af9 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Wed, 2 Sep 2020 14:00:38 -0400 Subject: [PATCH 08/11] fixup! fixup! fill in packet Platform and Metadata types --- .../install.openshift.io_installconfigs.yaml | 58 ++++++++++--- docs/dev/adding-new-platform.md | 2 + go.mod | 2 +- pkg/asset/ignition/bootstrap/bootstrap.go | 1 + pkg/asset/ignition/machine/node.go | 1 + pkg/asset/installconfig/packet/client.go | 2 +- .../packet/mock/packetclient_generated.go | 80 ++++++++++++++++++ pkg/asset/machines/packet/OWNERS | 7 ++ pkg/asset/machines/packet/machines.go | 81 +++++++++++++++++++ pkg/asset/machines/packet/machinesets.go | 77 ++++++++++++++++++ pkg/asset/rhcos/bootstrap_image.go | 1 + pkg/types/packet/defaults/platform.go | 12 ++- pkg/types/packet/machinepool.go | 22 ++--- pkg/types/packet/platform.go | 2 +- 14 files changed, 316 insertions(+), 32 deletions(-) create mode 100644 pkg/asset/machines/packet/OWNERS create mode 100644 pkg/asset/machines/packet/machines.go create mode 100644 pkg/asset/machines/packet/machinesets.go diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 0ca80a02a23..c58e01611b3 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -289,8 +289,6 @@ spec: packet: description: Packet is the configuration used when installing on Packet. - properties: {} - # TODO(displague) [properties] type: object vsphere: description: VSphere is the configuration used when installing @@ -579,11 +577,9 @@ spec: - high_performance type: string type: object - packet: - description: Packet is the configuration used when installing on - Packet. - properties: {} - # TODO(displague) properties + packet: + description: Packet is the configuration used when installing + on Packet. type: object vsphere: description: VSphere is the configuration used when installing @@ -1446,9 +1442,51 @@ spec: - ovirt_storage_domain_id type: object packet: - description: Packet is the configuration used when installing on Packet - properties: {} - # TODO(displague) properties + description: Packet is the configuration used when installing on Packet. + properties: + apivip: + description: 'APIVIP is the static IP on the nodes subnet that + the api port for openshift will be assigned Default: will be + set to the 5 on the first entry in the machineNetwork CIDR' + format: ip + type: string + bootstrapOSImage: + description: BootstrapOSImage is a URL to override the default + OS image for the bootstrap node. The URL must contain a sha256 + hash of the image e.g https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd... + type: string + clusterOSImage: + description: ClusterOSImage is a URL to override the default OS + image for cluster nodes. The URL must contain a sha256 hash + of the image e.g https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8... + type: string + computeSubnet: + description: ComputeSubnet is an existing subnet where the compute + nodes will be deployed. The value should be the name of the + subnet. + type: string + controlPlaneSubnet: + description: ControlPlaneSubnet is an existing subnet where the + control plane will be deployed. The value should be the name + of the subnet. + type: string + defaultMachinePlatform: + description: DefaultMachinePlatform is the default configuration + used when installing on bare metal for machine pools which do + not define their own platform configuration. + type: object + facility_code: + description: FacilityCode represents the Packet region and datacenter + where your devices will be provisioned (https://www.packet.com/developers/docs/getting-started/facilities/) + type: string + network: + description: Network specifies an existing VPC where the cluster + should be created rather than provisioning a new one. + type: string + project_id: + description: ProjectID represents the Packet project used for + logical grouping and invoicing (https://www.packet.com/developers/docs/API/getting-started/) + type: string type: object vsphere: description: VSphere is the configuration used when installing on diff --git a/docs/dev/adding-new-platform.md b/docs/dev/adding-new-platform.md index 6f433d39d1a..5d0f2baca9b 100644 --- a/docs/dev/adding-new-platform.md +++ b/docs/dev/adding-new-platform.md @@ -42,6 +42,8 @@ This document describes the process for adding a new platform to OpenShift in st Expected PRs: +* : Before starting any work, create an enhancement request. The OpenShift team will surface reference architectures and dependencies, viable paths, best practices, and expected challenges such as potential blockers and open issues. Follow the enhancement template at . + * : adding your Platform TODO(displague) expected limitations diff --git a/go.mod b/go.mod index c590748e519..fd1b22692a3 100644 --- a/go.mod +++ b/go.mod @@ -56,13 +56,13 @@ require ( github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156 github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43 - github.com/packethost/cluster-api-provider-packet v0.1.0 github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81 github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27 github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200406133650-74a154c1d861 github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db // indirect + github.com/packethost/cluster-api-provider-packet v0.1.0 github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f github.com/pborman/uuid v1.2.0 github.com/pierrec/lz4 v2.3.0+incompatible // indirect diff --git a/pkg/asset/ignition/bootstrap/bootstrap.go b/pkg/asset/ignition/bootstrap/bootstrap.go index ffc95bb0f49..4f462e0fbd3 100644 --- a/pkg/asset/ignition/bootstrap/bootstrap.go +++ b/pkg/asset/ignition/bootstrap/bootstrap.go @@ -331,6 +331,7 @@ func (a *Bootstrap) addSystemdUnits(uri string, templateData *bootstrapTemplateD "systemd-journal-gatewayd.socket": {}, "approve-csr.service": {}, // baremetal & openstack platform services + // TODO(displague) packet? "keepalived.service": {}, "coredns.service": {}, "ironic.service": {}, diff --git a/pkg/asset/ignition/machine/node.go b/pkg/asset/ignition/machine/node.go index 768d2ec9b83..12a8fc7ca71 100644 --- a/pkg/asset/ignition/machine/node.go +++ b/pkg/asset/ignition/machine/node.go @@ -37,6 +37,7 @@ func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, ro ignitionHost = net.JoinHostPort(installConfig.VSphere.APIVIP, "22623") } case packettypes.Name: + // TODO(displague) Do I have to set platform = "packet" around here? if installConfig.Packet.APIVIP != "" { ignitionHost = net.JoinHostPort(installConfig.Packet.APIVIP, "22623") diff --git a/pkg/asset/installconfig/packet/client.go b/pkg/asset/installconfig/packet/client.go index 5fd3d498d61..9bc8f21196a 100644 --- a/pkg/asset/installconfig/packet/client.go +++ b/pkg/asset/installconfig/packet/client.go @@ -11,7 +11,7 @@ const ( PACKET_CONSUMER_TOKEN = "redhat openshift ipi" ) -//go:generate mockgen -source=./client.go -destination=mock/packet_generated.go -package=mock +//go:generate mockgen -source=./client.go -destination=mock/packetclient_generated.go -package=mock // API represents the calls made to the API. type API interface { diff --git a/pkg/asset/installconfig/packet/mock/packetclient_generated.go b/pkg/asset/installconfig/packet/mock/packetclient_generated.go index e69de29bb2d..bf0a21d610b 100644 --- a/pkg/asset/installconfig/packet/mock/packetclient_generated.go +++ b/pkg/asset/installconfig/packet/mock/packetclient_generated.go @@ -0,0 +1,80 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + packngo "github.com/packethost/packngo" + reflect "reflect" +) + +// MockAPI is a mock of API interface +type MockAPI struct { + ctrl *gomock.Controller + recorder *MockAPIMockRecorder +} + +// MockAPIMockRecorder is the mock recorder for MockAPI +type MockAPIMockRecorder struct { + mock *MockAPI +} + +// NewMockAPI creates a new mock instance +func NewMockAPI(ctrl *gomock.Controller) *MockAPI { + mock := &MockAPI{ctrl: ctrl} + mock.recorder = &MockAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAPI) EXPECT() *MockAPIMockRecorder { + return m.recorder +} + +// ListProjects mocks base method +func (m *MockAPI) ListProjects(ctx context.Context) ([]packngo.Project, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListProjects", ctx) + ret0, _ := ret[0].([]packngo.Project) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListProjects indicates an expected call of ListProjects +func (mr *MockAPIMockRecorder) ListProjects(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProjects", reflect.TypeOf((*MockAPI)(nil).ListProjects), ctx) +} + +// ListFacilities mocks base method +func (m *MockAPI) ListFacilities(ctx context.Context) ([]packngo.Facility, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFacilities", ctx) + ret0, _ := ret[0].([]packngo.Facility) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFacilities indicates an expected call of ListFacilities +func (mr *MockAPIMockRecorder) ListFacilities(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFacilities", reflect.TypeOf((*MockAPI)(nil).ListFacilities), ctx) +} + +// ListPlans mocks base method +func (m *MockAPI) ListPlans(ctx context.Context) ([]packngo.Plan, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPlans", ctx) + ret0, _ := ret[0].([]packngo.Plan) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPlans indicates an expected call of ListPlans +func (mr *MockAPIMockRecorder) ListPlans(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPlans", reflect.TypeOf((*MockAPI)(nil).ListPlans), ctx) +} diff --git a/pkg/asset/machines/packet/OWNERS b/pkg/asset/machines/packet/OWNERS new file mode 100644 index 00000000000..8877fbf8d6d --- /dev/null +++ b/pkg/asset/machines/packet/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - packet-approvers +reviewers: + - packet-reviewers diff --git a/pkg/asset/machines/packet/machines.go b/pkg/asset/machines/packet/machines.go new file mode 100644 index 00000000000..2edd63d4f41 --- /dev/null +++ b/pkg/asset/machines/packet/machines.go @@ -0,0 +1,81 @@ +// Package packet generates Machine objects for packet. +package packet + +import ( + "fmt" + + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/packet" + + packetprovider "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1" +) + +// Machines returns a list of machines for a machinepool. +func Machines(clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, userDataSecret string) ([]machineapi.Machine, error) { + if configPlatform := config.Platform.Name(); configPlatform != packet.Name { + return nil, fmt.Errorf("non-packet configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != packet.Name { + return nil, fmt.Errorf("non-packet machine-pool: %q", poolPlatform) + } + platform := config.Platform.Packet + + total := int64(1) + if pool.Replicas != nil { + total = *pool.Replicas + } + provider := provider(platform, pool, userDataSecret, osImage) + var machines []machineapi.Machine + for idx := int64(0); idx < total; idx++ { + machine := machineapi.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, idx), + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + } + machines = append(machines, machine) + } + + return machines, nil +} + +func provider(platform *packet.Platform, pool *types.MachinePool, userDataSecret string, osImage string) *packetprovider.PacketMachineProviderSpec { + spec := packetprovider.PacketMachineProviderSpec{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "packetprovider.k8s.io/v1alpha1", + Kind: "PacketMachineProviderSpec", + }, + // TODO(displague) which role? + Roles: []packetprovider.MachineRole{packetprovider.MasterRole, packetprovider.NodeRole}, + Facility: []string{platform.FacilityCode}, + OS: "custom_ipxe", + ProjectID: platform.ProjectID, + // TODO(displague) IPXE + // IPXEScriptURL: osImage / platform.BootstrapOSImage / platform.ClusterOSImage? + BillingCycle: "hourly", + MachineType: "", // TODO(displague) must provide a type + SshKeys: []string{}, + // UserDataSecret: &corev1.LocalObjectReference{Name: userDataSecret}, + // CredentialsSecret: &corev1.LocalObjectReference{Name: "packet-credentials"}, + } + return &spec +} diff --git a/pkg/asset/machines/packet/machinesets.go b/pkg/asset/machines/packet/machinesets.go new file mode 100644 index 00000000000..582ea9887f4 --- /dev/null +++ b/pkg/asset/machines/packet/machinesets.go @@ -0,0 +1,77 @@ +// Package packet generates Machine objects for packet. +package packet + +import ( + "fmt" + + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/packet" +) + +// MachineSets returns a list of machinesets for a machinepool. +func MachineSets(clusterID string, config *types.InstallConfig, pool *types.MachinePool, osImage, role, + userDataSecret string) ([]*machineapi.MachineSet, error) { + + if configPlatform := config.Platform.Name(); configPlatform != packet.Name { + return nil, fmt.Errorf("non-packet configuration: %q", configPlatform) + } + if poolPlatform := pool.Platform.Name(); poolPlatform != "" && poolPlatform != packet.Name { + return nil, fmt.Errorf("non-packet machine-pool: %q", poolPlatform) + } + platform := config.Platform.Packet + + total := int64(0) + if pool.Replicas != nil { + total = *pool.Replicas + } + + provider := provider(platform, pool, userDataSecret, osImage) + name := fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, 0) + mset := &machineapi.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "openshift-machine-api", + Name: name, + Labels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSetSpec{ + Replicas: pointer.Int32Ptr(int32(total)), + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clusterID, + }, + }, + Template: machineapi.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machineset": name, + "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machine-role": role, + "machine.openshift.io/cluster-api-machine-type": role, + }, + }, + Spec: machineapi.MachineSpec{ + ProviderSpec: machineapi.ProviderSpec{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + }, + }, + } + + return []*machineapi.MachineSet{mset}, nil +} diff --git a/pkg/asset/rhcos/bootstrap_image.go b/pkg/asset/rhcos/bootstrap_image.go index 6fde584b5ba..8869a607775 100644 --- a/pkg/asset/rhcos/bootstrap_image.go +++ b/pkg/asset/rhcos/bootstrap_image.go @@ -42,6 +42,7 @@ func (i *BootstrapImage) Generate(p asset.Parents) error { ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() switch config.Platform.Name() { + // TODO(displague) Packet? case baremetal.Name: // Check for RHCOS image URL override if boi := config.Platform.BareMetal.BootstrapOSImage; boi != "" { diff --git a/pkg/types/packet/defaults/platform.go b/pkg/types/packet/defaults/platform.go index d3fca4c1478..148cd52e705 100644 --- a/pkg/types/packet/defaults/platform.go +++ b/pkg/types/packet/defaults/platform.go @@ -1,7 +1,13 @@ package defaults -// Defaults for the baremetal platform. +import "github.com/openshift/installer/pkg/types/packet" + +// Defaults for the packet platform. const ( - // TODO(displague) what API? + // TODO(displague) what API? metadata? DefaultURI = "https://api.packet.com" -) \ No newline at end of file +) + +// SetPlatformDefaults sets the defaults for the platform. +func SetPlatformDefaults(p *packet.Platform) { +} diff --git a/pkg/types/packet/machinepool.go b/pkg/types/packet/machinepool.go index f518d6110d8..265cb8d8a6e 100644 --- a/pkg/types/packet/machinepool.go +++ b/pkg/types/packet/machinepool.go @@ -3,23 +3,13 @@ package packet // MachinePool stores the configuration for a machine pool installed // on packet. type MachinePool struct { -} - -// Disk defines a BM disk -type Disk struct { - // SizeGB size of the bootable disk in GiB. - SizeGB int64 `json:"sizeGB"` -} - -// CPU defines the BM cpu, made of (Sockets * Cores). -type CPU struct { - // Sockets is the number of sockets for a BM. - // Total CPUs is (Sockets * Cores) - Sockets int32 `json:"sockets"` + // The Packet Plan defines the CPU, memory, and networking specs of the + // provisioned node + Plan string - // Cores is the number of cores per socket. - // Total CPUs is (Sockets * Cores) - Cores int32 `json:"cores"` + // TODO(displague) Hardware reservation id? + // TODO(displague) virtual network? + // TODO(displague) is userdata needed at this level? } // Set sets the values from `required` to `p`. diff --git a/pkg/types/packet/platform.go b/pkg/types/packet/platform.go index 954de21a281..7d792017a23 100644 --- a/pkg/types/packet/platform.go +++ b/pkg/types/packet/platform.go @@ -16,7 +16,7 @@ type Platform struct { APIVIP string `json:"apivip,omitempty"` // DefaultMachinePlatform is the default configuration used when - // installing on bare metal for machine pools which do not define their own + // installing on Packet for machine pools which do not define their own // platform configuration. // +optional DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` From e0e05892e2d362fe9e84a2ce79ea5bd66c7269b8 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Tue, 8 Sep 2020 14:32:34 -0400 Subject: [PATCH 09/11] fixup! fixup! fixup! fill in packet Platform and Metadata types --- pkg/asset/machines/master.go | 13 ++++++++ pkg/asset/machines/worker.go | 8 +++++ .../pkg/apis/addtoscheme_packet_v1alpha1.go | 26 +++++++++++++++ .../pkg/apis/apis.go | 33 +++++++++++++++++++ vendor/modules.txt | 1 + 5 files changed, 81 insertions(+) create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/addtoscheme_packet_v1alpha1.go create mode 100644 vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/apis.go diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index 8bca6faac86..4d8f2148a40 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -20,6 +20,7 @@ import ( vsphereapi "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider" vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + packetapi "github.com/packethost/cluster-api-provider-packet/pkg/apis" "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" @@ -42,6 +43,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines/machineconfig" "github.com/openshift/installer/pkg/asset/machines/openstack" "github.com/openshift/installer/pkg/asset/machines/ovirt" + "github.com/openshift/installer/pkg/asset/machines/packet" "github.com/openshift/installer/pkg/asset/machines/vsphere" "github.com/openshift/installer/pkg/asset/rhcos" rhcosutils "github.com/openshift/installer/pkg/rhcos" @@ -56,6 +58,7 @@ import ( nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -349,6 +352,15 @@ func (m *Master) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to create master machine objects") } vsphere.ConfigMasters(machines, clusterID.InfraID) + case packettypes.Name: + mpool := defaultPacketMachinePoolPlatform() + mpool.Set(ic.Platform.Packet.DefaultMachinePlatform) + mpool.Set(pool.Platform.Packet) + pool.Platform.Packet = &mpool + machines, err = packet.Machines(clusterID.InfraID, ic, pool, string(*rhcosImage), "master", "master-user-data") + if err != nil { + return errors.Wrap(err, "failed to create master machine objects") + } case nonetypes.Name: default: return fmt.Errorf("invalid Platform") @@ -464,6 +476,7 @@ func (m *Master) Machines() ([]machineapi.Machine, error) { libvirtapi.AddToScheme(scheme) openstackapi.AddToScheme(scheme) ovirtproviderapi.AddToScheme(scheme) + packetapi.AddToScheme(scheme) vsphereapi.AddToScheme(scheme) decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( awsprovider.SchemeGroupVersion, diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index 4c863fad3e1..5ddac3626a3 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -56,6 +56,7 @@ import ( nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -129,6 +130,13 @@ func defaultOvirtMachinePoolPlatform() ovirttypes.MachinePool { } } +func defaultPacketMachinePoolPlatform() packettypes.MachinePool { + return packettypes.MachinePool{ + // TODO(displague) what defaults should we supply? + Plan: "c3-medium.x86", + } +} + func defaultVSphereMachinePoolPlatform() vspheretypes.MachinePool { return vspheretypes.MachinePool{ NumCPUs: 2, diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/addtoscheme_packet_v1alpha1.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/addtoscheme_packet_v1alpha1.go new file mode 100644 index 00000000000..df2466da5cb --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/addtoscheme_packet_v1alpha1.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/apis.go b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/apis.go new file mode 100644 index 00000000000..d4c01b13a61 --- /dev/null +++ b/vendor/github.com/packethost/cluster-api-provider-packet/pkg/apis/apis.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 Packet Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generate deepcopy for apis +//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate.go.txt + +// Package apis contains Kubernetes API groups. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3c1cf8994b1..b80109c1d39 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1068,6 +1068,7 @@ github.com/ovirt/terraform-provider-ovirt/ovirt github.com/packer-community/winrmcp/winrmcp # github.com/packethost/cluster-api-provider-packet v0.1.0 ## explicit +github.com/packethost/cluster-api-provider-packet/pkg/apis github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1 # github.com/packethost/packngo v0.2.1-0.20200424110205-36917dbc292f ## explicit From 1730013ba690f7a6c0c84519ee80be9b819506f4 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Sat, 12 Sep 2020 23:31:11 -0400 Subject: [PATCH 10/11] fixup! fixup! fixup! fixup! fill in packet Platform and Metadata types --- .../files/usr/local/bin/bootkube.sh.template | 1 + .../cloud-creds-secret.yaml.template | 9 +++++++++ pkg/asset/machines/master.go | 2 ++ pkg/asset/machines/worker.go | 20 +++++++++++++++++++ pkg/asset/manifests/cloudproviderconfig.go | 4 +++- pkg/asset/manifests/dns.go | 4 +++- pkg/asset/manifests/openshift.go | 15 ++++++++++++++ pkg/asset/manifests/template.go | 11 ++++++++++ pkg/terraform/gather/packet/ip.go | 12 ++++++++++- 9 files changed, 75 insertions(+), 3 deletions(-) diff --git a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template index 331872e1265..69977ee112a 100755 --- a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template +++ b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template @@ -322,6 +322,7 @@ then copy_static_resources_for baremetal copy_static_resources_for openstack copy_static_resources_for ovirt + copy_static_resources_for packet copy_static_resources_for vsphere cp mco-bootstrap/manifests/* manifests/ diff --git a/data/data/manifests/openshift/cloud-creds-secret.yaml.template b/data/data/manifests/openshift/cloud-creds-secret.yaml.template index bd5c13d68f0..26ad70b5f34 100644 --- a/data/data/manifests/openshift/cloud-creds-secret.yaml.template +++ b/data/data/manifests/openshift/cloud-creds-secret.yaml.template @@ -14,6 +14,8 @@ metadata: name: vsphere-creds {{- else if .CloudCreds.Ovirt}} name: ovirt-credentials +{{- else if .CloudCreds.Packet}} + name: packet-credentials {{- end}} data: {{- if .CloudCreds.AWS}} @@ -42,4 +44,11 @@ data: ovirt_cafile: {{.CloudCreds.Ovirt.Base64encodeCAFile}} ovirt_insecure: {{.CloudCreds.Ovirt.Base64encodeInsecure}} ovirt_ca_bundle: {{.CloudCreds.Ovirt.Base64encodeCABundle}} +{{- else if .CloudCreds.Packet}} + packet_url: {{.CloudCreds.Packet.Base64encodeURL}} + packet_username: {{.CloudCreds.Packet.Base64encodeUsername}} + packet_password: {{.CloudCreds.Packet.Base64encodePassword}} + packet_cafile: {{.CloudCreds.Packet.Base64encodeCAFile}} + packet_insecure: {{.CloudCreds.Packet.Base64encodeInsecure}} + packet_ca_bundle: {{.CloudCreds.Packet.Base64encodeCABundle}} {{- end}} diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index 4d8f2148a40..8ddf0278906 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -21,6 +21,7 @@ import ( vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" packetapi "github.com/packethost/cluster-api-provider-packet/pkg/apis" + packetprovider "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" @@ -487,6 +488,7 @@ func (m *Master) Machines() ([]machineapi.Machine, error) { openstackprovider.SchemeGroupVersion, vsphereprovider.SchemeGroupVersion, ovirtprovider.SchemeGroupVersion, + packetprovider.SchemeGroupVersion, ) machines := []machineapi.Machine{} diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index 5ddac3626a3..69b999983ce 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -20,6 +20,8 @@ import ( vsphereproviderapi "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider" vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + packetapi "github.com/packethost/cluster-api-provider-packet/pkg/apis" + packetprovider "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" @@ -42,6 +44,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines/machineconfig" "github.com/openshift/installer/pkg/asset/machines/openstack" "github.com/openshift/installer/pkg/asset/machines/ovirt" + "github.com/openshift/installer/pkg/asset/machines/packet" "github.com/openshift/installer/pkg/asset/machines/vsphere" "github.com/openshift/installer/pkg/asset/rhcos" rhcosutils "github.com/openshift/installer/pkg/rhcos" @@ -383,6 +386,21 @@ func (w *Worker) Generate(dependencies asset.Parents) error { for _, set := range sets { machineSets = append(machineSets, set) } + case packettypes.Name: + mpool := defaultPacketMachinePoolPlatform() + mpool.Set(ic.Platform.Packet.DefaultMachinePlatform) + mpool.Set(pool.Platform.Packet) + pool.Platform.Packet = &mpool + + imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID) + + sets, err := packet.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", "worker-user-data") + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects for ovirt provider") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } case nonetypes.Name: default: return fmt.Errorf("invalid Platform") @@ -466,6 +484,7 @@ func (w *Worker) MachineSets() ([]machineapi.MachineSet, error) { libvirtapi.AddToScheme(scheme) openstackapi.AddToScheme(scheme) ovirtproviderapi.AddToScheme(scheme) + packetapi.AddToScheme(scheme) vsphereproviderapi.AddToScheme(scheme) decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( awsprovider.SchemeGroupVersion, @@ -475,6 +494,7 @@ func (w *Worker) MachineSets() ([]machineapi.MachineSet, error) { libvirtprovider.SchemeGroupVersion, openstackprovider.SchemeGroupVersion, ovirtprovider.SchemeGroupVersion, + packetprovider.SchemeGroupVersion, vsphereprovider.SchemeGroupVersion, ) diff --git a/pkg/asset/manifests/cloudproviderconfig.go b/pkg/asset/manifests/cloudproviderconfig.go index 626ad47f856..74a329396cc 100644 --- a/pkg/asset/manifests/cloudproviderconfig.go +++ b/pkg/asset/manifests/cloudproviderconfig.go @@ -26,6 +26,7 @@ import ( nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -83,7 +84,8 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error { } switch installConfig.Config.Platform.Name() { - case awstypes.Name, libvirttypes.Name, nonetypes.Name, baremetaltypes.Name, ovirttypes.Name: + case awstypes.Name, libvirttypes.Name, nonetypes.Name, baremetaltypes.Name, ovirttypes.Name, packettypes.Name: + // TODO(displague) What should Packet do? return nil case openstacktypes.Name: cloud, err := icopenstack.GetSession(installConfig.Config.Platform.OpenStack.Cloud) diff --git a/pkg/asset/manifests/dns.go b/pkg/asset/manifests/dns.go index ce851dde1ab..b245a7a1bd5 100644 --- a/pkg/asset/manifests/dns.go +++ b/pkg/asset/manifests/dns.go @@ -25,6 +25,7 @@ import ( nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -118,7 +119,8 @@ func (d *DNS) Generate(dependencies asset.Parents) error { config.Spec.PublicZone = &configv1.DNSZone{ID: zone.Name} } config.Spec.PrivateZone = &configv1.DNSZone{ID: fmt.Sprintf("%s-private-zone", clusterID.InfraID)} - case libvirttypes.Name, openstacktypes.Name, baremetaltypes.Name, nonetypes.Name, vspheretypes.Name, ovirttypes.Name: + case libvirttypes.Name, openstacktypes.Name, baremetaltypes.Name, nonetypes.Name, vspheretypes.Name, ovirttypes.Name, packettypes.Name: + // TODO(displague) What should Packet do? default: return errors.New("invalid Platform") } diff --git a/pkg/asset/manifests/openshift.go b/pkg/asset/manifests/openshift.go index 06705016d87..e6bd122a491 100644 --- a/pkg/asset/manifests/openshift.go +++ b/pkg/asset/manifests/openshift.go @@ -15,6 +15,7 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/installconfig/gcp" "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + "github.com/openshift/installer/pkg/asset/installconfig/packet" "github.com/openshift/installer/pkg/asset/machines" openstackmanifests "github.com/openshift/installer/pkg/asset/manifests/openstack" "github.com/openshift/installer/pkg/asset/openshiftinstall" @@ -30,6 +31,7 @@ import ( gcptypes "github.com/openshift/installer/pkg/types/gcp" openstacktypes "github.com/openshift/installer/pkg/types/openstack" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + packettypes "github.com/openshift/installer/pkg/types/packet" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -177,6 +179,19 @@ func (o *Openshift) Generate(dependencies asset.Parents) error { Base64encodeCABundle: base64.StdEncoding.EncodeToString([]byte(conf.CABundle)), }, } + case packettypes.Name: + conf, err := packet.NewConfig() + if err != nil { + return err + } + + cloudCreds = cloudCredsSecretData{ + Packet: &PacketCredsSecretData{ + Base64encodeURL: base64.StdEncoding.EncodeToString([]byte(conf.APIURL)), + Base64encodeUsername: base64.StdEncoding.EncodeToString([]byte(conf.APIKey)), + }, + } + } templateData := &openshiftTemplateData{ diff --git a/pkg/asset/manifests/template.go b/pkg/asset/manifests/template.go index 01082d60388..85f3e46b447 100644 --- a/pkg/asset/manifests/template.go +++ b/pkg/asset/manifests/template.go @@ -47,6 +47,16 @@ type OvirtCredsSecretData struct { Base64encodeCABundle string } +// PacketCredsSecretData holds encoded credentials and is used to generated cloud-creds secret +type PacketCredsSecretData struct { + Base64encodeURL string + Base64encodeUsername string + Base64encodePassword string + Base64encodeCAFile string + Base64encodeInsecure string + Base64encodeCABundle string +} + type cloudCredsSecretData struct { AWS *AwsCredsSecretData Azure *AzureCredsSecretData @@ -54,6 +64,7 @@ type cloudCredsSecretData struct { OpenStack *OpenStackCredsSecretData VSphere *VSphereCredsSecretData Ovirt *OvirtCredsSecretData + Packet *PacketCredsSecretData } type bootkubeTemplateData struct { diff --git a/pkg/terraform/gather/packet/ip.go b/pkg/terraform/gather/packet/ip.go index 9cf29af89fe..a91ada6723f 100644 --- a/pkg/terraform/gather/packet/ip.go +++ b/pkg/terraform/gather/packet/ip.go @@ -3,12 +3,22 @@ package packet import ( "github.com/openshift/installer/pkg/terraform" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) // BootstrapIP returns the ip address for bootstrap host. // TODO(displague) implement func BootstrapIP(tfs *terraform.State) (string, error) { - return "", nil + br, err := terraform.LookupResource(tfs, "module.bootstrap", "packet_device", "lb") + if err != nil { + return "", errors.Wrap(err, "failed to lookup bootstrap") + } + if len(br.Instances) == 0 { + return "", errors.New("no bootstrap instance found") + } + bootstrap, _, err := unstructured.NestedString(br.Instances[0].Attributes, "access_public_ipv4") + return bootstrap, err } // ControlPlaneIPs returns the ip addresses for control plane hosts. From 176a395a5ed67a2e8d92c1690b91dcc15a81167b Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Sat, 12 Sep 2020 23:33:47 -0400 Subject: [PATCH 11/11] fixup! fixup! fixup! fixup! fixup! fill in packet Platform and Metadata types --- pkg/asset/quota/quota.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/asset/quota/quota.go b/pkg/asset/quota/quota.go index 5cd9b0df942..5ed8d9f0692 100644 --- a/pkg/asset/quota/quota.go +++ b/pkg/asset/quota/quota.go @@ -25,6 +25,7 @@ import ( "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/packet" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -105,7 +106,8 @@ func (a *PlatformQuotaCheck) Generate(dependencies asset.Parents) error { return summarizeFailingReport(reports) } summarizeReport(reports) - case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, vsphere.Name: + case azure.Name, baremetal.Name, libvirt.Name, none.Name, openstack.Name, ovirt.Name, packet.Name, vsphere.Name: + // TODO(displague) Anything special for Packet? // no special provisioning requirements to check default: err = fmt.Errorf("unknown platform type %q", platform)