From 6c41a4d65abb65c1f11719159a8ef91e1a5991fc Mon Sep 17 00:00:00 2001 From: Sam Gartner Date: Thu, 22 Jan 2026 16:16:09 -0600 Subject: [PATCH] deleting clusters updates update configs Update README.md fix defaults updates to workflows --- .../action.yaml | 28 +++- .../action.yaml | 28 +++- .../run-hostbusters-test-suites/action.yaml | 28 +++- actions/clusters/clusters.go | 38 ++++- validation/certificates/k3s/README.md | 2 +- .../k3s/dualstack/defaults/defaults.yaml | 3 + validation/certificates/rke2/README.md | 2 +- .../schemas/hostbusters_schemas.yaml | 86 ---------- .../ipv6/schemas/hostbusters_schemas.yaml | 86 ---------- validation/deleting/k3s/README.md | 148 ++++++++++++++++++ .../deleting/{ => k3s}/defaults/defaults.yaml | 0 .../{rke2k3s => k3s}/delete_cluster_test.go | 42 ++--- .../delete_init_machine_test.go | 64 ++++---- .../k3s/dualstack/defaults/defaults.yaml | 16 ++ .../dualstack/delete_cluster_test.go | 40 ++--- .../dualstack/delete_init_machine_test.go | 55 ++++--- .../schemas/hostbusters_schemas.yaml | 43 +++++ .../deleting/k3s/ipv6/defaults/defaults.yaml | 16 ++ .../{ => k3s}/ipv6/delete_cluster_test.go | 52 +++--- .../ipv6/delete_init_machine_test.go | 74 ++++----- .../k3s/ipv6/schemas/hostbusters_schemas.yaml | 43 +++++ .../k3s/schemas/hostbusters_schemas.yaml | 43 +++++ validation/deleting/rke2/README.md | 80 ++++++++++ .../deleting/rke2/defaults/defaults.yaml | 49 ++++++ .../deleting/rke2/delete_cluster_test.go | 110 +++++++++++++ .../deleting/rke2/delete_init_machine_test.go | 129 +++++++++++++++ .../dualstack/defaults/defaults.yaml | 0 .../rke2/dualstack/delete_cluster_test.go | 110 +++++++++++++ .../dualstack/delete_init_machine_test.go | 128 +++++++++++++++ .../schemas/hostbusters_schemas.yaml | 43 +++++ .../{ => rke2}/ipv6/defaults/defaults.yaml | 0 .../deleting/rke2/ipv6/delete_cluster_test.go | 110 +++++++++++++ .../rke2/ipv6/delete_init_machine_test.go | 129 +++++++++++++++ .../ipv6/schemas/hostbusters_schemas.yaml | 43 +++++ .../rke2/schemas/hostbusters_schemas.yaml | 43 +++++ .../deleting/rke2k3s/delete_init_machine.go | 38 ----- .../rke2k3s/schemas/hostbusters_schemas.yaml | 86 ---------- 37 files changed, 1542 insertions(+), 493 deletions(-) delete mode 100644 validation/deleting/dualstack/schemas/hostbusters_schemas.yaml delete mode 100644 validation/deleting/ipv6/schemas/hostbusters_schemas.yaml create mode 100644 validation/deleting/k3s/README.md rename validation/deleting/{ => k3s}/defaults/defaults.yaml (100%) rename validation/deleting/{rke2k3s => k3s}/delete_cluster_test.go (69%) rename validation/deleting/{rke2k3s => k3s}/delete_init_machine_test.go (60%) create mode 100644 validation/deleting/k3s/dualstack/defaults/defaults.yaml rename validation/deleting/{ => k3s}/dualstack/delete_cluster_test.go (68%) rename validation/deleting/{ => k3s}/dualstack/delete_init_machine_test.go (66%) create mode 100644 validation/deleting/k3s/dualstack/schemas/hostbusters_schemas.yaml create mode 100644 validation/deleting/k3s/ipv6/defaults/defaults.yaml rename validation/deleting/{ => k3s}/ipv6/delete_cluster_test.go (62%) rename validation/deleting/{ => k3s}/ipv6/delete_init_machine_test.go (54%) create mode 100644 validation/deleting/k3s/ipv6/schemas/hostbusters_schemas.yaml create mode 100644 validation/deleting/k3s/schemas/hostbusters_schemas.yaml create mode 100644 validation/deleting/rke2/README.md create mode 100644 validation/deleting/rke2/defaults/defaults.yaml create mode 100644 validation/deleting/rke2/delete_cluster_test.go create mode 100644 validation/deleting/rke2/delete_init_machine_test.go rename validation/deleting/{ => rke2}/dualstack/defaults/defaults.yaml (100%) create mode 100644 validation/deleting/rke2/dualstack/delete_cluster_test.go create mode 100644 validation/deleting/rke2/dualstack/delete_init_machine_test.go create mode 100644 validation/deleting/rke2/dualstack/schemas/hostbusters_schemas.yaml rename validation/deleting/{ => rke2}/ipv6/defaults/defaults.yaml (100%) create mode 100644 validation/deleting/rke2/ipv6/delete_cluster_test.go create mode 100644 validation/deleting/rke2/ipv6/delete_init_machine_test.go create mode 100644 validation/deleting/rke2/ipv6/schemas/hostbusters_schemas.yaml create mode 100644 validation/deleting/rke2/schemas/hostbusters_schemas.yaml delete mode 100644 validation/deleting/rke2k3s/delete_init_machine.go delete mode 100644 validation/deleting/rke2k3s/schemas/hostbusters_schemas.yaml diff --git a/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml b/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml index f3c9cbf66..7fdbcd2b9 100644 --- a/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml +++ b/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml @@ -54,14 +54,29 @@ runs: fi gotestsum --format standard-verbose \ - --packages=github.com/rancher/tests/validation/deleting/dualstack \ + --packages=github.com/rancher/tests/validation/deleting/rke2/dualstack \ --junitfile results.xml \ - --jsonfile results_delete.json \ + --jsonfile results_rke2_delete.json \ -- -timeout=5h -tags=recurring -v - delete_exit=$? - echo "delete_exit=$delete_exit" >> "$GITHUB_ENV" - cp results_delete.json results.json + rke2_delete_exit=$? + echo "rke2_delete_exit=$rke2_delete_exit" >> "$GITHUB_ENV" + cp results_rke2_delete.json results.json + + if [[ "${{ inputs.reporting }}" == "true" ]]; then + ./validation/pipeline/scripts/build_qase_reporter_v2.sh; + ./validation/reporter + fi + + gotestsum --format standard-verbose \ + --packages=github.com/rancher/tests/validation/deleting/k3s/dualstack \ + --junitfile results.xml \ + --jsonfile results_k3s_delete.json \ + -- -timeout=5h -tags=recurring -v + + k3s_delete_exit=$? + echo "k3s_delete_exit=$k3s_delete_exit" >> "$GITHUB_ENV" + cp results_k3s_delete.json results.json if [[ "${{ inputs.reporting }}" == "true" ]]; then ./validation/pipeline/scripts/build_qase_reporter_v2.sh; @@ -153,7 +168,8 @@ runs: declare -A suites=( [k3s_cert_exit]="K3s Cert Rotation:results_k3s_cert.json" [rke2_cert_exit]="RKE2 Cert Rotation:results_rke2_cert.json" - [delete_exit]="Delete Cluster:results_delete.json" + [rke2_delete_exit]="Delete RKE2 Cluster:results_rke2_delete.json" + [k3s_delete_exit]="Delete K3S Cluster:results_k3s_delete.json" [node_scale_exit]="Node Scaling:results_node_scale.json" [prov_exit]="Provisioning:results_prov.json" [rke2_snapshot_exit]="RKE2 Snapshot Restore:results_rke2_snapshot.json" diff --git a/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml b/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml index e9df6a46a..397356ce5 100644 --- a/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml +++ b/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml @@ -54,14 +54,29 @@ runs: fi gotestsum --format standard-verbose \ - --packages=github.com/rancher/tests/validation/deleting/ipv6 \ + --packages=github.com/rancher/tests/validation/deleting/rke2/ipv6 \ --junitfile results.xml \ - --jsonfile results_delete.json \ + --jsonfile results_rke2_delete.json \ -- -timeout=5h -tags=recurring -v - delete_exit=$? - echo "delete_exit=$delete_exit" >> "$GITHUB_ENV" - cp results_delete.json results.json + rke2_delete_exit=$? + echo "rke2_delete_exit=$rke2_delete_exit" >> "$GITHUB_ENV" + cp results_rke2_delete.json results.json + + if [[ "${{ inputs.reporting }}" == "true" ]]; then + ./validation/pipeline/scripts/build_qase_reporter_v2.sh; + ./validation/reporter + fi + + gotestsum --format standard-verbose \ + --packages=github.com/rancher/tests/validation/deleting/k3s/ipv6 \ + --junitfile results.xml \ + --jsonfile results_k3s_delete.json \ + -- -timeout=5h -tags=recurring -v + + k3s_delete_exit=$? + echo "k3s_delete_exit=$k3s_delete_exit" >> "$GITHUB_ENV" + cp results_k3s_delete.json results.json if [[ "${{ inputs.reporting }}" == "true" ]]; then ./validation/pipeline/scripts/build_qase_reporter_v2.sh; @@ -153,7 +168,8 @@ runs: declare -A suites=( [k3s_cert_exit]="K3s Cert Rotation:results_k3s_cert.json" [rke2_cert_exit]="RKE2 Cert Rotation:results_rke2_cert.json" - [delete_exit]="Delete Cluster:results_delete.json" + [rke2_delete_exit]="Delete RKE2 Cluster:results_rke2_delete.json" + [k3s_delete_exit]="Delete K3S Cluster:results_k3s_delete.json" [node_scale_exit]="Node Scaling:results_node_scale.json" [prov_exit]="Provisioning:results_prov.json" [rke2_snapshot_exit]="RKE2 Snapshot Restore:results_rke2_snapshot.json" diff --git a/.github/actions/run-hostbusters-test-suites/action.yaml b/.github/actions/run-hostbusters-test-suites/action.yaml index f3fcaaf49..04700b9be 100644 --- a/.github/actions/run-hostbusters-test-suites/action.yaml +++ b/.github/actions/run-hostbusters-test-suites/action.yaml @@ -54,14 +54,29 @@ runs: fi gotestsum --format standard-verbose \ - --packages=github.com/rancher/tests/validation/deleting/rke2k3s \ + --packages=github.com/rancher/tests/validation/deleting/rke2 \ --junitfile results.xml \ - --jsonfile results_delete.json \ + --jsonfile results_rke2_delete.json \ -- -timeout=5h -tags=recurring -v - delete_exit=$? - echo "delete_exit=$delete_exit" >> "$GITHUB_ENV" - cp results_delete.json results.json + rke2_delete_exit=$? + echo "rke2_delete_exit=$rke2_delete_exit" >> "$GITHUB_ENV" + cp results_rke2_delete.json results.json + + if [[ "${{ inputs.reporting }}" == "true" ]]; then + ./validation/pipeline/scripts/build_qase_reporter_v2.sh; + ./validation/reporter + fi + + gotestsum --format standard-verbose \ + --packages=github.com/rancher/tests/validation/deleting/k3s \ + --junitfile results.xml \ + --jsonfile results_k3s_delete.json \ + -- -timeout=5h -tags=recurring -v + + k3s_delete_exit=$? + echo "k3s_delete_exit=$k3s_delete_exit" >> "$GITHUB_ENV" + cp results_k3s_delete.json results.json if [[ "${{ inputs.reporting }}" == "true" ]]; then ./validation/pipeline/scripts/build_qase_reporter_v2.sh; @@ -168,7 +183,8 @@ runs: declare -A suites=( [rke2_cert_exit]="RKE2 Cert Rotation:results_rke2_cert.json" [k3s_cert_exit]="K3S Cert Rotation:results_k3s_cert.json" - [delete_exit]="Delete Cluster:results_delete.json" + [rke2_delete_exit]="Delete RKE2 Cluster:results_rke2_delete.json" + [k3s_delete_exit]="Delete K3S Cluster:results_k3s_delete.json" [nodescaling_exit]="Node Scaling:results_node_scale.json" [k3s_exit]="K3S:results_k3s.json" [rke2_exit]="RKE2:results_rke2.json" diff --git a/actions/clusters/clusters.go b/actions/clusters/clusters.go index da8e90df6..c712bd01e 100644 --- a/actions/clusters/clusters.go +++ b/actions/clusters/clusters.go @@ -14,7 +14,10 @@ import ( steveV1 "github.com/rancher/shepherd/clients/rancher/v1" v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults" "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/steve" + "github.com/rancher/tests/actions/machinepools" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -25,11 +28,9 @@ const ( baseline = "baseline" externalAws = "external-aws" protectKernelDefaults = "protect-kernel-defaults" - - localcluster = "fleet-local/local" - rancherRestricted = "rancher-restricted" - rke1HardenedGID = 52034 - rke1HardenedUID = 52034 + rancherRestricted = "rancher-restricted" + rke1HardenedGID = 52034 + rke1HardenedUID = 52034 ) // CreateRancherBaselinePSACT creates custom PSACT called rancher-baseline which sets each PSS to baseline. @@ -950,3 +951,30 @@ func DeletePSACT(client *rancher.Client, psactID string) error { return nil } + +// DeleteInitMachine deletes the init machine from the specified cluster. +func DeleteInitMachine(client *rancher.Client, clusterID string) error { + initMachine, err := machinepools.GetInitMachine(client, clusterID) + if err != nil { + return err + } + + err = client.Steve.SteveType(stevetypes.Machine).Delete(initMachine) + if err != nil { + return err + } + + logrus.Debugf("Waiting for the init machine to be deleted on cluster (%s)", clusterID) + err = steve.WaitForResourceDeletion(client.Steve, initMachine, defaults.FiveHundredMillisecondTimeout, defaults.TenMinuteTimeout) + if err != nil { + return err + } + + logrus.Debugf("Waiting for the init machine to be replaced on cluster (%s)", clusterID) + err = clusters.WatchAndWaitForCluster(client, clusterID) + if err != nil { + return err + } + + return nil +} diff --git a/validation/certificates/k3s/README.md b/validation/certificates/k3s/README.md index 4401df497..eebf9ada2 100644 --- a/validation/certificates/k3s/README.md +++ b/validation/certificates/k3s/README.md @@ -73,7 +73,7 @@ rancher: ``` ### Provisioning cluster -This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the snapshot test [k3s provisioning](../../provisioning/k3s/README.md) +This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the certificate test [k3s provisioning](../../provisioning/k3s/README.md) ## Defaults This package contains a defaults folder which contains default test configuration data for non-sensitive fields. The goal of this data is to: diff --git a/validation/certificates/k3s/dualstack/defaults/defaults.yaml b/validation/certificates/k3s/dualstack/defaults/defaults.yaml index 6b75d274e..97d90faee 100644 --- a/validation/certificates/k3s/dualstack/defaults/defaults.yaml +++ b/validation/certificates/k3s/dualstack/defaults/defaults.yaml @@ -4,6 +4,9 @@ clusterConfig: clusterCIDR: "" serviceCIDR: "" stackPreference: "" + advanced: + machineGlobalConfig: + flannel-ipv6-masq: true awsMachineConfigs: awsMachineConfig: diff --git a/validation/certificates/rke2/README.md b/validation/certificates/rke2/README.md index 7c756113b..f441a0473 100644 --- a/validation/certificates/rke2/README.md +++ b/validation/certificates/rke2/README.md @@ -91,7 +91,7 @@ rancher: ``` ### Provisioning cluster -This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the snapshot test [rke2 provisioning](../../provisioning/rke2/README.md) +This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the certificate test [rke2 provisioning](../../provisioning/rke2/README.md) ## Defaults This package contains a defaults folder which contains default test configuration data for non-sensitive fields. The goal of this data is to: diff --git a/validation/deleting/dualstack/schemas/hostbusters_schemas.yaml b/validation/deleting/dualstack/schemas/hostbusters_schemas.yaml deleted file mode 100644 index 66a18ca81..000000000 --- a/validation/deleting/dualstack/schemas/hostbusters_schemas.yaml +++ /dev/null @@ -1,86 +0,0 @@ -- suite: Go Automation/Deleting - projects: [RRT, RM] - cases: - - description: Deletes an existing cluster - title: RKE2_Delete_Dualstack_Cluster - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster is deleted - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes an existing cluster - title: K3S_Delete_Dualstack_Cluster - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster is deleted - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes the init machine on an existing cluster - title: RKE2_Dualstack_Delete_Init_Machine - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the init machine on an existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes the init machine on an existing cluster - title: K3S_Dualstack_Delete_Init_Machine - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the init machine on an existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/ipv6/schemas/hostbusters_schemas.yaml b/validation/deleting/ipv6/schemas/hostbusters_schemas.yaml deleted file mode 100644 index 38f3c2385..000000000 --- a/validation/deleting/ipv6/schemas/hostbusters_schemas.yaml +++ /dev/null @@ -1,86 +0,0 @@ -- suite: Go Automation/Deleting - projects: [RRT, RM] - cases: - - description: Deletes an existing cluster - title: RKE2_Delete_IPv6_Cluster - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster is deleted - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes an existing cluster - title: K3S_Delete_IPv6_Cluster - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster is deleted - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes the init machine on an existing cluster - title: RKE2_IPv6_Delete_Init_Machine - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the init machine on an existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes the init machine on an existing cluster - title: K3S_IPv6_Delete_Init_Machine - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the init machine on an existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/k3s/README.md b/validation/deleting/k3s/README.md new file mode 100644 index 000000000..12faa711a --- /dev/null +++ b/validation/deleting/k3s/README.md @@ -0,0 +1,148 @@ +# K3S Deleting Configs + +## Table of Contents +1. [Prerequisites](../README.md) +2. [Tests Cases](#Test-Cases) +3. [Configurations](#Configurations) +4. [Configuration Defaults](#defaults) +5. [Logging Levels](#Logging) +6. [Back to general deleting](../README.md) + +## Test Cases +All of the test cases in this package are listed below, keep in mind that all configuration for these tests have built in defaults [Configuration Defaults](#defaults). These tests will provision a cluster if one is not provided via the rancher.ClusterName field. + +### Delete cluster test + +#### Description: +Verifies that a cluster can be deleted. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Delete_Cluster` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/k3s --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteClusterTestSuite/TestDeletingCluster -timeout=1h -v` + + +### Delete cluster init machine test + +#### Description: +Verifies that a cluster is able to recover from deleting the init machine. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Delete_Cluster` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/k3s --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteInitMachineTestSuite/TestDeleteInitMachine -timeout=1h -v` + + +### Delete IPv6 cluster test + +#### Description: +Verifies that a ipv6 cluster can be deleted. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Delete_IPv6_Cluster` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/k3s/ipv6 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteIPv6ClusterTestSuite/TestDeletingIPv6Cluster -timeout=1h -v` + + +### Delete IPv6 cluster init machine test + +#### Description: +Verifies that a IPv6 cluster is able to recover from deleting the init machine. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_IPv6_Delete_Init_Machine` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/k3s/ipv6 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteInitMachineIPv6TestSuite/TestDeleteInitMachineIPv6 -timeout=1h -v` + + +### Delete Dualstack cluster test + +#### Description: +Verifies that a dualstack cluster can be deleted. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Delete_Dualstack_Cluster` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/k3s/dualstack --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteDualstackClusterTestSuite/TestDeletingDualstackCluster -timeout=1h -v` + + +### Delete Dualstack cluster init machine test + +#### Description: +Verifies that a dualstack cluster is able to recover from deleting the init machine. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Dualstack_Delete_Init_Machine` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/k3s/dualstack --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteInitMachineDualstackTestSuite/TestDeleteInitMachineDualstack -timeout=1h -v` + +## Configurations + +### Existing cluster: +```yaml +rancher: + host: + adminToken: + clusterName: "" + cleanup: true + insecure: true +``` + +### Provisioning cluster +This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the deleting test [k3s provisioning](../../provisioning/k3s/README.md) + +## Defaults +This package contains a defaults folder which contains default test configuration data for non-sensitive fields. The goal of this data is to: +1. Reduce the number of fields the user needs to provide in the cattle_config file. +2. Reduce the amount of yaml data that needs to be stored in our pipelines. +3. Make it easier to run tests + +Any data the user provides will override these defaults which are stored here: [defaults](defaults/defaults.yaml). + +## Logging +This package supports several logging levels. You can set the logging levels via the cattle config and all levels above the provided level will be logged while all logs below that logging level will be omitted. + +```yaml +logging: + level: "trace" #trace debug, info, warning, error +``` + +## Additional +1. If the tests passes immediately without warning, try adding the `-count=1` or run `go clean -cache`. This will avoid previous results from interfering with the new test run. +2. All of the tests utilize parallelism when running for more finite control of how things are run in parallel use the -p and -parallel. \ No newline at end of file diff --git a/validation/deleting/defaults/defaults.yaml b/validation/deleting/k3s/defaults/defaults.yaml similarity index 100% rename from validation/deleting/defaults/defaults.yaml rename to validation/deleting/k3s/defaults/defaults.yaml diff --git a/validation/deleting/rke2k3s/delete_cluster_test.go b/validation/deleting/k3s/delete_cluster_test.go similarity index 69% rename from validation/deleting/rke2k3s/delete_cluster_test.go rename to validation/deleting/k3s/delete_cluster_test.go index d56ba0662..4f4b9ac30 100644 --- a/validation/deleting/rke2k3s/delete_cluster_test.go +++ b/validation/deleting/k3s/delete_cluster_test.go @@ -1,6 +1,6 @@ //go:build (infra.rke2k3s || validation || recurring) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke1 && !stress && !sanity && !extended -package rke2k3s +package k3s import ( "os" @@ -9,6 +9,7 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/config/operations" "github.com/rancher/shepherd/pkg/session" @@ -29,8 +30,7 @@ type DeleteClusterTestSuite struct { client *rancher.Client session *session.Session cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (d *DeleteClusterTestSuite) TearDownSuite() { @@ -63,34 +63,38 @@ func (d *DeleteClusterTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) - provider := provisioning.CreateProvider(clusterConfig.Provider) - machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) - logrus.Info("Provisioning RKE2 cluster") - d.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - logrus.Info("Provisioning K3S cluster") - d.k3sCluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + logrus.Info("Provisioning K3S cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } } func (d *DeleteClusterTestSuite) TestDeletingCluster() { tests := []struct { - name string - clusterID string + name string + cluster *v1.SteveAPIObject }{ - {"RKE2_Delete_Cluster", d.rke2Cluster.ID}, - {"K3S_Delete_Cluster", d.k3sCluster.ID}, + {"K3S_Delete_Cluster", d.cluster}, } for _, tt := range tests { d.Run(tt.name, func() { - logrus.Infof("Deleting cluster (%s)", tt.clusterID) - extClusters.DeleteK3SRKE2Cluster(d.client, tt.clusterID) + logrus.Infof("Deleting cluster (%s)", tt.cluster.ID) + extClusters.DeleteK3SRKE2Cluster(d.client, tt.cluster.ID) - logrus.Infof("Verifying cluster (%s) deletion", tt.clusterID) - provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.clusterID) + logrus.Infof("Verifying cluster (%s) deletion", tt.cluster.ID) + provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.cluster.ID) }) params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) diff --git a/validation/deleting/rke2k3s/delete_init_machine_test.go b/validation/deleting/k3s/delete_init_machine_test.go similarity index 60% rename from validation/deleting/rke2k3s/delete_init_machine_test.go rename to validation/deleting/k3s/delete_init_machine_test.go index a0f8cbeaa..cdb984be2 100644 --- a/validation/deleting/rke2k3s/delete_init_machine_test.go +++ b/validation/deleting/k3s/delete_init_machine_test.go @@ -1,6 +1,6 @@ //go:build (infra.rke2k3s || validation || recurring) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke1 && !stress && !sanity && !extended -package rke2k3s +package k3s import ( "os" @@ -8,7 +8,6 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - extClusters "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/config/operations" @@ -33,8 +32,7 @@ type DeleteInitMachineTestSuite struct { client *rancher.Client session *session.Session cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (d *DeleteInitMachineTestSuite) TearDownSuite() { @@ -67,52 +65,54 @@ func (d *DeleteInitMachineTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) - nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) - nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 - nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 - nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 - clusterConfig.MachinePools = nodeRolesStandard + if rancherConfig.ClusterName == "" { + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} - provider := provisioning.CreateProvider(clusterConfig.Provider) - machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 + nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 + nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 + clusterConfig.MachinePools = nodeRolesStandard - logrus.Info("Provisioning RKE2 cluster") - d.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - logrus.Info("Provisioning K3S cluster") - d.k3sCluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + logrus.Info("Provisioning K3S cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } } func (d *DeleteInitMachineTestSuite) TestDeleteInitMachine() { tests := []struct { - name string - clusterID string + name string + cluster *v1.SteveAPIObject }{ - {"RKE2_Delete_Init_Machine", d.rke2Cluster.ID}, - {"K3S_Delete_Init_Machine", d.k3sCluster.ID}, + {"K3S_Delete_Init_Machine", d.cluster}, } for _, tt := range tests { - cluster, err := d.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(d.T(), err) - + var err error d.Run(tt.name, func() { - logrus.Infof("Deleting init machine on cluster (%s)", cluster.Name) - err := DeleteInitMachine(d.client, tt.clusterID) + logrus.Infof("Deleting init machine on cluster (%s)", tt.cluster.Name) + err := clusters.DeleteInitMachine(d.client, tt.cluster.ID) require.NoError(d.T(), err) - logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) - provisioning.VerifyClusterReady(d.T(), d.client, cluster) + logrus.Infof("Verifying the cluster is ready (%s)", tt.cluster.Name) + provisioning.VerifyClusterReady(d.T(), d.client, tt.cluster) - logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) - err = deployment.VerifyClusterDeployments(d.client, cluster) + logrus.Infof("Verifying cluster deployments (%s)", tt.cluster.Name) + err = deployment.VerifyClusterDeployments(d.client, tt.cluster) require.NoError(d.T(), err) - logrus.Infof("Verifying cluster pods (%s)", cluster.Name) - err = pods.VerifyClusterPods(d.client, cluster) + logrus.Infof("Verifying cluster pods (%s)", tt.cluster.Name) + err = pods.VerifyClusterPods(d.client, tt.cluster) require.NoError(d.T(), err) }) diff --git a/validation/deleting/k3s/dualstack/defaults/defaults.yaml b/validation/deleting/k3s/dualstack/defaults/defaults.yaml new file mode 100644 index 000000000..97d90faee --- /dev/null +++ b/validation/deleting/k3s/dualstack/defaults/defaults.yaml @@ -0,0 +1,16 @@ +#Required for all dualstack/ipv6 tests +clusterConfig: + networking: + clusterCIDR: "" + serviceCIDR: "" + stackPreference: "" + advanced: + machineGlobalConfig: + flannel-ipv6-masq: true + +awsMachineConfigs: + awsMachineConfig: + - enablePrimaryIPv6: true + httpProtocolIpv6: "enabled" + ipv6AddressOnly: true + ipv6AddressCount: "1" \ No newline at end of file diff --git a/validation/deleting/dualstack/delete_cluster_test.go b/validation/deleting/k3s/dualstack/delete_cluster_test.go similarity index 68% rename from validation/deleting/dualstack/delete_cluster_test.go rename to validation/deleting/k3s/dualstack/delete_cluster_test.go index 0b78656d6..de0d1ddbe 100644 --- a/validation/deleting/dualstack/delete_cluster_test.go +++ b/validation/deleting/k3s/dualstack/delete_cluster_test.go @@ -9,6 +9,7 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/config/operations" "github.com/rancher/shepherd/pkg/session" @@ -29,8 +30,7 @@ type DeleteDualstackClusterTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (d *DeleteDualstackClusterTestSuite) TearDownSuite() { @@ -63,34 +63,38 @@ func (d *DeleteDualstackClusterTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) - provider := provisioning.CreateProvider(clusterConfig.Provider) - machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) - logrus.Info("Provisioning RKE2 cluster") - d.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - logrus.Info("Provisioning K3S cluster") - d.k3sCluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + logrus.Info("Provisioning K3S cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } } func (d *DeleteDualstackClusterTestSuite) TestDeletingDualstackCluster() { tests := []struct { - name string - clusterID string + name string + cluster *v1.SteveAPIObject }{ - {"RKE2_Delete_Dualstack_Cluster", d.rke2Cluster.ID}, - {"K3S_Delete_Dualstack_Cluster", d.k3sCluster.ID}, + {"K3S_Delete_Dualstack_Cluster", d.cluster}, } for _, tt := range tests { d.Run(tt.name, func() { - logrus.Infof("Deleting cluster (%s)", tt.clusterID) - extClusters.DeleteK3SRKE2Cluster(d.client, tt.clusterID) + logrus.Infof("Deleting cluster (%s)", tt.cluster.Name) + extClusters.DeleteK3SRKE2Cluster(d.client, tt.cluster.ID) - logrus.Infof("Verifying cluster (%s) deletion", tt.clusterID) - provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.clusterID) + logrus.Infof("Verifying cluster (%s) deletion", tt.cluster.ID) + provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.cluster.ID) }) params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) diff --git a/validation/deleting/dualstack/delete_init_machine_test.go b/validation/deleting/k3s/dualstack/delete_init_machine_test.go similarity index 66% rename from validation/deleting/dualstack/delete_init_machine_test.go rename to validation/deleting/k3s/dualstack/delete_init_machine_test.go index ed0e1a407..b51838a9b 100644 --- a/validation/deleting/dualstack/delete_init_machine_test.go +++ b/validation/deleting/k3s/dualstack/delete_init_machine_test.go @@ -8,7 +8,6 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - extClusters "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/config/operations" @@ -21,7 +20,6 @@ import ( "github.com/rancher/tests/actions/qase" "github.com/rancher/tests/actions/workloads/deployment" "github.com/rancher/tests/actions/workloads/pods" - "github.com/rancher/tests/validation/deleting/rke2k3s" resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" "github.com/sirupsen/logrus" @@ -34,8 +32,7 @@ type DeleteInitMachineDualstackTestSuite struct { client *rancher.Client session *session.Session cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (d *DeleteInitMachineDualstackTestSuite) TearDownSuite() { @@ -68,6 +65,9 @@ func (d *DeleteInitMachineDualstackTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 @@ -75,45 +75,44 @@ func (d *DeleteInitMachineDualstackTestSuite) SetupSuite() { nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 clusterConfig.MachinePools = nodeRolesStandard - provider := provisioning.CreateProvider(clusterConfig.Provider) - machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - - logrus.Info("Provisioning RKE2 cluster") - d.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - logrus.Info("Provisioning K3s cluster") - d.k3sCluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) + logrus.Info("Provisioning K3S cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } } func (d *DeleteInitMachineDualstackTestSuite) TestDeleteInitMachineDualstack() { tests := []struct { - name string - clusterID string + name string + cluster *v1.SteveAPIObject }{ - {"RKE2_Dualstack_Delete_Init_Machine", d.rke2Cluster.ID}, - {"K3S_Dualstack_Delete_Init_Machine", d.k3sCluster.ID}, + {"K3S_Dualstack_Delete_Init_Machine", d.cluster}, } for _, tt := range tests { - cluster, err := d.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(d.T(), err) - + var err error d.Run(tt.name, func() { - logrus.Infof("Deleting init machine on cluster (%s)", cluster.Name) - err := rke2k3s.DeleteInitMachine(d.client, tt.clusterID) + logrus.Infof("Deleting init machine on cluster (%s)", tt.cluster.Name) + err := clusters.DeleteInitMachine(d.client, tt.cluster.ID) require.NoError(d.T(), err) - logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) - provisioning.VerifyClusterReady(d.T(), d.client, cluster) + logrus.Infof("Verifying the cluster is ready (%s)", tt.cluster.Name) + provisioning.VerifyClusterReady(d.T(), d.client, tt.cluster) - logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) - err = deployment.VerifyClusterDeployments(d.client, cluster) + logrus.Infof("Verifying cluster deployments (%s)", tt.cluster.Name) + err = deployment.VerifyClusterDeployments(d.client, tt.cluster) require.NoError(d.T(), err) - logrus.Infof("Verifying cluster pods (%s)", cluster.Name) - pods.VerifyClusterPods(d.client, cluster) + logrus.Infof("Verifying cluster pods (%s)", tt.cluster.Name) + pods.VerifyClusterPods(d.client, tt.cluster) }) params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) diff --git a/validation/deleting/k3s/dualstack/schemas/hostbusters_schemas.yaml b/validation/deleting/k3s/dualstack/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..685202ed2 --- /dev/null +++ b/validation/deleting/k3s/dualstack/schemas/hostbusters_schemas.yaml @@ -0,0 +1,43 @@ +- suite: Go Automation/Deleting/k3s/dualstack + projects: [RRT, RM] + cases: + - description: Deletes an existing cluster + title: K3S_Delete_Dualstack_Cluster + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster is deleted + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + - description: Deletes the init machine on an existing cluster + title: K3S_Dualstack_Delete_Init_Machine + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the init machine on an existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/k3s/ipv6/defaults/defaults.yaml b/validation/deleting/k3s/ipv6/defaults/defaults.yaml new file mode 100644 index 000000000..97d90faee --- /dev/null +++ b/validation/deleting/k3s/ipv6/defaults/defaults.yaml @@ -0,0 +1,16 @@ +#Required for all dualstack/ipv6 tests +clusterConfig: + networking: + clusterCIDR: "" + serviceCIDR: "" + stackPreference: "" + advanced: + machineGlobalConfig: + flannel-ipv6-masq: true + +awsMachineConfigs: + awsMachineConfig: + - enablePrimaryIPv6: true + httpProtocolIpv6: "enabled" + ipv6AddressOnly: true + ipv6AddressCount: "1" \ No newline at end of file diff --git a/validation/deleting/ipv6/delete_cluster_test.go b/validation/deleting/k3s/ipv6/delete_cluster_test.go similarity index 62% rename from validation/deleting/ipv6/delete_cluster_test.go rename to validation/deleting/k3s/ipv6/delete_cluster_test.go index 9e28457fd..8dc526752 100644 --- a/validation/deleting/ipv6/delete_cluster_test.go +++ b/validation/deleting/k3s/ipv6/delete_cluster_test.go @@ -6,10 +6,10 @@ import ( "os" "testing" - rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/config/operations" "github.com/rancher/shepherd/pkg/session" @@ -17,7 +17,6 @@ import ( "github.com/rancher/tests/actions/config/defaults" "github.com/rancher/tests/actions/logging" "github.com/rancher/tests/actions/provisioning" - "github.com/rancher/tests/actions/provisioninginput" "github.com/rancher/tests/actions/qase" resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" @@ -31,8 +30,7 @@ type DeleteIPv6ClusterTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (d *DeleteIPv6ClusterTestSuite) TearDownSuite() { @@ -65,46 +63,38 @@ func (d *DeleteIPv6ClusterTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) - provider := provisioning.CreateProvider(clusterConfig.Provider) - machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) - logrus.Info("Provisioning RKE2 cluster") - d.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) - - if clusterConfig.Advanced == nil { - clusterConfig.Advanced = &provisioninginput.Advanced{} - } + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - if clusterConfig.Advanced.MachineGlobalConfig == nil { - clusterConfig.Advanced.MachineGlobalConfig = &rkev1.GenericMap{ - Data: map[string]any{}, - } + logrus.Info("Provisioning K3s cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) } - - clusterConfig.Advanced.MachineGlobalConfig.Data["flannel-ipv6-masq"] = true - - logrus.Info("Provisioning K3s cluster") - d.k3sCluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) } func (d *DeleteIPv6ClusterTestSuite) TestDeletingIPv6Cluster() { tests := []struct { - name string - clusterID string + name string + cluster *v1.SteveAPIObject }{ - {"RKE2_Delete_IPv6_Cluster", d.rke2Cluster.ID}, - {"K3S_Delete_IPv6_Cluster", d.k3sCluster.ID}, + {"K3S_Delete_IPv6_Cluster", d.cluster}, } for _, tt := range tests { d.Run(tt.name, func() { - logrus.Infof("Deleting cluster (%s)", tt.clusterID) - extClusters.DeleteK3SRKE2Cluster(d.client, tt.clusterID) + logrus.Infof("Deleting cluster (%s)", tt.cluster.ID) + extClusters.DeleteK3SRKE2Cluster(d.client, tt.cluster.ID) - logrus.Infof("Verifying cluster (%s) deletion", tt.clusterID) - provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.clusterID) + logrus.Infof("Verifying cluster (%s) deletion", tt.cluster.ID) + provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.cluster.ID) }) params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) diff --git a/validation/deleting/ipv6/delete_init_machine_test.go b/validation/deleting/k3s/ipv6/delete_init_machine_test.go similarity index 54% rename from validation/deleting/ipv6/delete_init_machine_test.go rename to validation/deleting/k3s/ipv6/delete_init_machine_test.go index c3b0ffabf..c846bb9c6 100644 --- a/validation/deleting/ipv6/delete_init_machine_test.go +++ b/validation/deleting/k3s/ipv6/delete_init_machine_test.go @@ -6,10 +6,8 @@ import ( "os" "testing" - rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - extClusters "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/config/operations" @@ -22,7 +20,6 @@ import ( "github.com/rancher/tests/actions/qase" "github.com/rancher/tests/actions/workloads/deployment" "github.com/rancher/tests/actions/workloads/pods" - "github.com/rancher/tests/validation/deleting/rke2k3s" resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" "github.com/sirupsen/logrus" @@ -35,8 +32,7 @@ type DeleteInitMachineIPv6TestSuite struct { client *rancher.Client session *session.Session cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (d *DeleteInitMachineIPv6TestSuite) TearDownSuite() { @@ -69,64 +65,54 @@ func (d *DeleteInitMachineIPv6TestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) - nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) - nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 - nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 - nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 - clusterConfig.MachinePools = nodeRolesStandard + if rancherConfig.ClusterName == "" { + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} - provider := provisioning.CreateProvider(clusterConfig.Provider) - machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 + nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 + nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 + clusterConfig.MachinePools = nodeRolesStandard - logrus.Info("Provisioning RKE2 cluster") - d.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) - - if clusterConfig.Advanced == nil { - clusterConfig.Advanced = &provisioninginput.Advanced{} - } + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) - if clusterConfig.Advanced.MachineGlobalConfig == nil { - clusterConfig.Advanced.MachineGlobalConfig = &rkev1.GenericMap{ - Data: map[string]any{}, - } + logrus.Info("Provisioning K3s cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) } - - clusterConfig.Advanced.MachineGlobalConfig.Data["flannel-ipv6-masq"] = true - - logrus.Info("Provisioning K3s cluster") - d.k3sCluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(d.T(), err) } func (d *DeleteInitMachineIPv6TestSuite) TestDeleteInitMachineIPv6() { tests := []struct { - name string - clusterID string + name string + cluster *v1.SteveAPIObject }{ - {"RKE2_IPv6_Delete_Init_Machine", d.rke2Cluster.ID}, - {"K3S_IPv6_Delete_Init_Machine", d.k3sCluster.ID}, + {"K3S_IPv6_Delete_Init_Machine", d.cluster}, } for _, tt := range tests { - cluster, err := d.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(d.T(), err) - + var err error d.Run(tt.name, func() { - logrus.Infof("Deleting init machine on cluster (%s)", cluster.Name) - err := rke2k3s.DeleteInitMachine(d.client, tt.clusterID) + logrus.Infof("Deleting init machine on cluster (%s)", tt.cluster.Name) + err := clusters.DeleteInitMachine(d.client, tt.cluster.ID) require.NoError(d.T(), err) - logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) - provisioning.VerifyClusterReady(d.T(), d.client, cluster) + logrus.Infof("Verifying the cluster is ready (%s)", tt.cluster.Name) + provisioning.VerifyClusterReady(d.T(), d.client, tt.cluster) - logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) - err = deployment.VerifyClusterDeployments(d.client, cluster) + logrus.Infof("Verifying cluster deployments (%s)", tt.cluster.Name) + err = deployment.VerifyClusterDeployments(d.client, tt.cluster) require.NoError(d.T(), err) - logrus.Infof("Verifying cluster pods (%s)", cluster.Name) - err = pods.VerifyClusterPods(d.client, cluster) + logrus.Infof("Verifying cluster pods (%s)", tt.cluster.Name) + err = pods.VerifyClusterPods(d.client, tt.cluster) require.NoError(d.T(), err) }) diff --git a/validation/deleting/k3s/ipv6/schemas/hostbusters_schemas.yaml b/validation/deleting/k3s/ipv6/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..32754b018 --- /dev/null +++ b/validation/deleting/k3s/ipv6/schemas/hostbusters_schemas.yaml @@ -0,0 +1,43 @@ +- suite: Go Automation/Deleting/k3s/ipv6 + projects: [RRT, RM] + cases: + - description: Deletes an existing cluster + title: K3S_Delete_IPv6_Cluster + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster is deleted + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + - description: Deletes the init machine on an existing cluster + title: K3S_IPv6_Delete_Init_Machine + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the init machine on an existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/k3s/schemas/hostbusters_schemas.yaml b/validation/deleting/k3s/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..2068d71f8 --- /dev/null +++ b/validation/deleting/k3s/schemas/hostbusters_schemas.yaml @@ -0,0 +1,43 @@ +- suite: Go Automation/Deleting/k3s + projects: [RRT, RM] + cases: + - description: Deletes an existing cluster + title: K3S_Delete_Cluster + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster is deleted + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + - description: Deletes the init machine on an existing cluster + title: K3S_Delete_Init_Machine + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the init machine on an existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/rke2/README.md b/validation/deleting/rke2/README.md new file mode 100644 index 000000000..8e750459f --- /dev/null +++ b/validation/deleting/rke2/README.md @@ -0,0 +1,80 @@ +# RKE2 Deleting Configs + +## Table of Contents +1. [Prerequisites](../README.md) +2. [Tests Cases](#Test-Cases) +3. [Configurations](#Configurations) +4. [Configuration Defaults](#defaults) +5. [Logging Levels](#Logging) +6. [Back to general deleting](../README.md) + +## Test Cases +All of the test cases in this package are listed below, keep in mind that all configuration for these tests have built in defaults [Configuration Defaults](#defaults). These tests will provision a cluster if one is not provided via the rancher.ClusterName field. + +### Delete cluster test + +#### Description: +Verifies that a cluster can be deleted. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Delete_Cluster` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteClusterTestSuite/TestDeletingCluster -timeout=1h -v` + + +### Delete cluster init machine test + +#### Description: +Verifies that a cluster is able to recover from deleting the init machine. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Delete_Cluster` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/deleting/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestDeleteInitMachineTestSuite/TestDeleteInitMachine -timeout=1h -v` + +## Configurations + +### Existing cluster: +```yaml +rancher: + host: + adminToken: + clusterName: "" + cleanup: true + insecure: true +``` + +### Provisioning cluster +This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the deleting test [rke2 provisioning](../../provisioning/rke2/README.md) + +## Defaults +This package contains a defaults folder which contains default test configuration data for non-sensitive fields. The goal of this data is to: +1. Reduce the number of fields the user needs to provide in the cattle_config file. +2. Reduce the amount of yaml data that needs to be stored in our pipelines. +3. Make it easier to run tests + +Any data the user provides will override these defaults which are stored here: [defaults](defaults/defaults.yaml). + +## Logging +This package supports several logging levels. You can set the logging levels via the cattle config and all levels above the provided level will be logged while all logs below that logging level will be omitted. + +```yaml +logging: + level: "trace" #trace debug, info, warning, error +``` + +## Additional +1. If the tests passes immediately without warning, try adding the `-count=1` or run `go clean -cache`. This will avoid previous results from interfering with the new test run. +2. All of the tests utilize parallelism when running for more finite control of how things are run in parallel use the -p and -parallel. \ No newline at end of file diff --git a/validation/deleting/rke2/defaults/defaults.yaml b/validation/deleting/rke2/defaults/defaults.yaml new file mode 100644 index 000000000..a32fec54a --- /dev/null +++ b/validation/deleting/rke2/defaults/defaults.yaml @@ -0,0 +1,49 @@ +#Required for all tests +rancher: + host: "" + adminToken: "" + insecure: true + cleanup: true + +#Required for all tests except template +clusterConfig: + machinePools: + - machinePoolConfig: + etcd: true + controlplane: false + worker: false + quantity: 1 + - machinePoolConfig: + etcd: false + controlplane: true + worker: false + quantity: 1 + - machinePoolConfig: + etcd: false + controlplane: false + worker: true + quantity: 1 + kubernetesVersion: "" + cni: "calico" + provider: "aws" + nodeProvider: "ec2" + +#Required for all tests +awsCredentials: + secretKey: "" + accessKey: "" + defaultRegion: "us-east-2" + +#Required for tests that utilize node driver clusters +awsMachineConfigs: + region: "us-east-2" + awsMachineConfig: + - roles: ["etcd","controlplane","worker"] + ami: "" + instanceType: "t3a.medium" + sshUser: "" + vpcId: "" + retries: "5" + +logging: + level: "info" \ No newline at end of file diff --git a/validation/deleting/rke2/delete_cluster_test.go b/validation/deleting/rke2/delete_cluster_test.go new file mode 100644 index 000000000..fa2aaad46 --- /dev/null +++ b/validation/deleting/rke2/delete_cluster_test.go @@ -0,0 +1,110 @@ +//go:build (infra.rke2k3s || validation || recurring) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke1 && !stress && !sanity && !extended + +package rke2 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DeleteClusterTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (d *DeleteClusterTestSuite) TearDownSuite() { + d.session.Cleanup() +} + +func (d *DeleteClusterTestSuite) SetupSuite() { + testSession := session.NewSession() + d.session = testSession + + client, err := rancher.NewClient("", d.session) + require.NoError(d.T(), err) + + d.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(d.client) + require.NoError(d.T(), err) + + d.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + d.cattleConfig, err = defaults.LoadPackageDefaults(d.cattleConfig, "") + require.NoError(d.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, d.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(d.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + + logrus.Info("Provisioning RKE2 cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.RKE2, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } +} + +func (d *DeleteClusterTestSuite) TestDeletingCluster() { + tests := []struct { + name string + cluster *v1.SteveAPIObject + }{ + {"RKE2_Delete_Cluster", d.cluster}, + } + + for _, tt := range tests { + d.Run(tt.name, func() { + logrus.Infof("Deleting cluster (%s)", tt.cluster.ID) + extClusters.DeleteK3SRKE2Cluster(d.client, tt.cluster.ID) + + logrus.Infof("Verifying cluster (%s) deletion", tt.cluster.ID) + provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.cluster.ID) + }) + + params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) + err := qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestDeleteClusterTestSuite(t *testing.T) { + suite.Run(t, new(DeleteClusterTestSuite)) +} diff --git a/validation/deleting/rke2/delete_init_machine_test.go b/validation/deleting/rke2/delete_init_machine_test.go new file mode 100644 index 000000000..3a1ab1329 --- /dev/null +++ b/validation/deleting/rke2/delete_init_machine_test.go @@ -0,0 +1,129 @@ +//go:build (infra.rke2k3s || validation || recurring) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke1 && !stress && !sanity && !extended + +package rke2 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/provisioninginput" + "github.com/rancher/tests/actions/qase" + "github.com/rancher/tests/actions/workloads/deployment" + "github.com/rancher/tests/actions/workloads/pods" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DeleteInitMachineTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (d *DeleteInitMachineTestSuite) TearDownSuite() { + d.session.Cleanup() +} + +func (d *DeleteInitMachineTestSuite) SetupSuite() { + testSession := session.NewSession() + d.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(d.T(), err) + + d.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(d.client) + require.NoError(d.T(), err) + + d.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + d.cattleConfig, err = defaults.LoadPackageDefaults(d.cattleConfig, "") + require.NoError(d.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, d.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(d.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + + if rancherConfig.ClusterName == "" { + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + + nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 + nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 + nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 + clusterConfig.MachinePools = nodeRolesStandard + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + + logrus.Info("Provisioning RKE2 cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.RKE2, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } +} + +func (d *DeleteInitMachineTestSuite) TestDeleteInitMachine() { + tests := []struct { + name string + cluster *v1.SteveAPIObject + }{ + {"RKE2_Delete_Init_Machine", d.cluster}, + } + + for _, tt := range tests { + var err error + d.Run(tt.name, func() { + logrus.Infof("Deleting init machine on cluster (%s)", tt.cluster.Name) + err := clusters.DeleteInitMachine(d.client, tt.cluster.ID) + require.NoError(d.T(), err) + + logrus.Infof("Verifying the cluster is ready (%s)", tt.cluster.Name) + provisioning.VerifyClusterReady(d.T(), d.client, tt.cluster) + + logrus.Infof("Verifying cluster deployments (%s)", tt.cluster.Name) + err = deployment.VerifyClusterDeployments(d.client, tt.cluster) + require.NoError(d.T(), err) + + logrus.Infof("Verifying cluster pods (%s)", tt.cluster.Name) + err = pods.VerifyClusterPods(d.client, tt.cluster) + require.NoError(d.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestDeleteInitMachineTestSuite(t *testing.T) { + suite.Run(t, new(DeleteInitMachineTestSuite)) +} diff --git a/validation/deleting/dualstack/defaults/defaults.yaml b/validation/deleting/rke2/dualstack/defaults/defaults.yaml similarity index 100% rename from validation/deleting/dualstack/defaults/defaults.yaml rename to validation/deleting/rke2/dualstack/defaults/defaults.yaml diff --git a/validation/deleting/rke2/dualstack/delete_cluster_test.go b/validation/deleting/rke2/dualstack/delete_cluster_test.go new file mode 100644 index 000000000..bcead4631 --- /dev/null +++ b/validation/deleting/rke2/dualstack/delete_cluster_test.go @@ -0,0 +1,110 @@ +//go:build validation || recurring + +package dualstack + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DeleteDualstackClusterTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (d *DeleteDualstackClusterTestSuite) TearDownSuite() { + d.session.Cleanup() +} + +func (d *DeleteDualstackClusterTestSuite) SetupSuite() { + testSession := session.NewSession() + d.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(d.T(), err) + + d.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(d.client) + require.NoError(d.T(), err) + + d.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + d.cattleConfig, err = defaults.LoadPackageDefaults(d.cattleConfig, "") + require.NoError(d.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, d.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(d.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + + logrus.Info("Provisioning RKE2 cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.RKE2, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } +} + +func (d *DeleteDualstackClusterTestSuite) TestDeletingDualstackCluster() { + tests := []struct { + name string + cluster *v1.SteveAPIObject + }{ + {"RKE2_Delete_Dualstack_Cluster", d.cluster}, + } + + for _, tt := range tests { + d.Run(tt.name, func() { + logrus.Infof("Deleting cluster (%s)", tt.cluster.Name) + extClusters.DeleteK3SRKE2Cluster(d.client, tt.cluster.ID) + + logrus.Infof("Verifying cluster (%s) deletion", tt.cluster.ID) + provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.cluster.ID) + }) + + params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) + err := qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestDeleteDualstackClusterTestSuite(t *testing.T) { + suite.Run(t, new(DeleteDualstackClusterTestSuite)) +} diff --git a/validation/deleting/rke2/dualstack/delete_init_machine_test.go b/validation/deleting/rke2/dualstack/delete_init_machine_test.go new file mode 100644 index 000000000..513747ae8 --- /dev/null +++ b/validation/deleting/rke2/dualstack/delete_init_machine_test.go @@ -0,0 +1,128 @@ +//go:build validation || recurring + +package dualstack + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/provisioninginput" + "github.com/rancher/tests/actions/qase" + "github.com/rancher/tests/actions/workloads/deployment" + "github.com/rancher/tests/actions/workloads/pods" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DeleteInitMachineDualstackTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (d *DeleteInitMachineDualstackTestSuite) TearDownSuite() { + d.session.Cleanup() +} + +func (d *DeleteInitMachineDualstackTestSuite) SetupSuite() { + testSession := session.NewSession() + d.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(d.T(), err) + + d.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(d.client) + require.NoError(d.T(), err) + + d.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + d.cattleConfig, err = defaults.LoadPackageDefaults(d.cattleConfig, "") + require.NoError(d.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, d.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(d.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + + nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 + nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 + nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 + clusterConfig.MachinePools = nodeRolesStandard + + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + + logrus.Info("Provisioning RKE2 cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.RKE2, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } +} + +func (d *DeleteInitMachineDualstackTestSuite) TestDeleteInitMachineDualstack() { + tests := []struct { + name string + cluster *v1.SteveAPIObject + }{ + {"RKE2_Dualstack_Delete_Init_Machine", d.cluster}, + } + + for _, tt := range tests { + var err error + d.Run(tt.name, func() { + logrus.Infof("Deleting init machine on cluster (%s)", tt.cluster.Name) + err := clusters.DeleteInitMachine(d.client, tt.cluster.ID) + require.NoError(d.T(), err) + + logrus.Infof("Verifying the cluster is ready (%s)", tt.cluster.Name) + provisioning.VerifyClusterReady(d.T(), d.client, tt.cluster) + + logrus.Infof("Verifying cluster deployments (%s)", tt.cluster.Name) + err = deployment.VerifyClusterDeployments(d.client, tt.cluster) + require.NoError(d.T(), err) + + logrus.Infof("Verifying cluster pods (%s)", tt.cluster.Name) + pods.VerifyClusterPods(d.client, tt.cluster) + }) + + params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestDeleteInitMachineDualstackTestSuite(t *testing.T) { + suite.Run(t, new(DeleteInitMachineDualstackTestSuite)) +} diff --git a/validation/deleting/rke2/dualstack/schemas/hostbusters_schemas.yaml b/validation/deleting/rke2/dualstack/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..5c87e4509 --- /dev/null +++ b/validation/deleting/rke2/dualstack/schemas/hostbusters_schemas.yaml @@ -0,0 +1,43 @@ +- suite: Go Automation/Deleting/rke2/dualstack + projects: [RRT, RM] + cases: + - description: Deletes an existing cluster + title: RKE2_Delete_Dualstack_Cluster + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster is deleted + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + - description: Deletes the init machine on an existing cluster + title: RKE2_Dualstack_Delete_Init_Machine + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the init machine on an existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/ipv6/defaults/defaults.yaml b/validation/deleting/rke2/ipv6/defaults/defaults.yaml similarity index 100% rename from validation/deleting/ipv6/defaults/defaults.yaml rename to validation/deleting/rke2/ipv6/defaults/defaults.yaml diff --git a/validation/deleting/rke2/ipv6/delete_cluster_test.go b/validation/deleting/rke2/ipv6/delete_cluster_test.go new file mode 100644 index 000000000..8fadf91e6 --- /dev/null +++ b/validation/deleting/rke2/ipv6/delete_cluster_test.go @@ -0,0 +1,110 @@ +//go:build validation || recurring + +package ipv6 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DeleteIPv6ClusterTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (d *DeleteIPv6ClusterTestSuite) TearDownSuite() { + d.session.Cleanup() +} + +func (d *DeleteIPv6ClusterTestSuite) SetupSuite() { + testSession := session.NewSession() + d.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(d.T(), err) + + d.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(d.client) + require.NoError(d.T(), err) + + d.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + d.cattleConfig, err = defaults.LoadPackageDefaults(d.cattleConfig, "") + require.NoError(d.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, d.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(d.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + + logrus.Info("Provisioning RKE2 cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.RKE2, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } +} + +func (d *DeleteIPv6ClusterTestSuite) TestDeletingIPv6Cluster() { + tests := []struct { + name string + cluster *v1.SteveAPIObject + }{ + {"RKE2_Delete_IPv6_Cluster", d.cluster}, + } + + for _, tt := range tests { + d.Run(tt.name, func() { + logrus.Infof("Deleting cluster (%s)", tt.cluster.ID) + extClusters.DeleteK3SRKE2Cluster(d.client, tt.cluster.ID) + + logrus.Infof("Verifying cluster (%s) deletion", tt.cluster.ID) + provisioning.VerifyDeleteRKE2K3SCluster(d.T(), d.client, tt.cluster.ID) + }) + + params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) + err := qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestDeleteIPv6ClusterTestSuite(t *testing.T) { + suite.Run(t, new(DeleteIPv6ClusterTestSuite)) +} diff --git a/validation/deleting/rke2/ipv6/delete_init_machine_test.go b/validation/deleting/rke2/ipv6/delete_init_machine_test.go new file mode 100644 index 000000000..b453b65c0 --- /dev/null +++ b/validation/deleting/rke2/ipv6/delete_init_machine_test.go @@ -0,0 +1,129 @@ +//go:build validation || recurring + +package ipv6 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/provisioninginput" + "github.com/rancher/tests/actions/qase" + "github.com/rancher/tests/actions/workloads/deployment" + "github.com/rancher/tests/actions/workloads/pods" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DeleteInitMachineIPv6TestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (d *DeleteInitMachineIPv6TestSuite) TearDownSuite() { + d.session.Cleanup() +} + +func (d *DeleteInitMachineIPv6TestSuite) SetupSuite() { + testSession := session.NewSession() + d.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(d.T(), err) + + d.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(d.client) + require.NoError(d.T(), err) + + d.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + d.cattleConfig, err = defaults.LoadPackageDefaults(d.cattleConfig, "") + require.NoError(d.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, d.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(d.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, d.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, d.cattleConfig, rancherConfig) + + if rancherConfig.ClusterName == "" { + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + + nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 + nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 + nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 + clusterConfig.MachinePools = nodeRolesStandard + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(d.cattleConfig) + + logrus.Info("Provisioning RKE2 cluster") + d.cluster, err = resources.ProvisionRKE2K3SCluster(d.T(), standardUserClient, defaults.RKE2, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(d.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + d.cluster, err = d.client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + rancherConfig.ClusterName) + require.NoError(d.T(), err) + } +} + +func (d *DeleteInitMachineIPv6TestSuite) TestDeleteInitMachineIPv6() { + tests := []struct { + name string + cluster *v1.SteveAPIObject + }{ + {"RKE2_IPv6_Delete_Init_Machine", d.cluster}, + } + + for _, tt := range tests { + var err error + d.Run(tt.name, func() { + logrus.Infof("Deleting init machine on cluster (%s)", tt.cluster.Name) + err := clusters.DeleteInitMachine(d.client, tt.cluster.ID) + require.NoError(d.T(), err) + + logrus.Infof("Verifying the cluster is ready (%s)", tt.cluster.Name) + provisioning.VerifyClusterReady(d.T(), d.client, tt.cluster) + + logrus.Infof("Verifying cluster deployments (%s)", tt.cluster.Name) + err = deployment.VerifyClusterDeployments(d.client, tt.cluster) + require.NoError(d.T(), err) + + logrus.Infof("Verifying cluster pods (%s)", tt.cluster.Name) + err = pods.VerifyClusterPods(d.client, tt.cluster) + require.NoError(d.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(d.client, d.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestDeleteInitMachineIPv6TestSuite(t *testing.T) { + suite.Run(t, new(DeleteInitMachineIPv6TestSuite)) +} diff --git a/validation/deleting/rke2/ipv6/schemas/hostbusters_schemas.yaml b/validation/deleting/rke2/ipv6/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..9ce8300af --- /dev/null +++ b/validation/deleting/rke2/ipv6/schemas/hostbusters_schemas.yaml @@ -0,0 +1,43 @@ +- suite: Go Automation/Deleting/rke2/ipv6 + projects: [RRT, RM] + cases: + - description: Deletes an existing cluster + title: RKE2_Delete_IPv6_Cluster + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster is deleted + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + - description: Deletes the init machine on an existing cluster + title: RKE2_IPv6_Delete_Init_Machine + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the init machine on an existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/rke2/schemas/hostbusters_schemas.yaml b/validation/deleting/rke2/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..50eb457d1 --- /dev/null +++ b/validation/deleting/rke2/schemas/hostbusters_schemas.yaml @@ -0,0 +1,43 @@ +- suite: Go Automation/Deleting/rke2 + projects: [RRT, RM] + cases: + - description: Deletes an existing cluster + title: RKE2_Delete_Cluster + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster is deleted + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + - description: Deletes the init machine on an existing cluster + title: RKE2_Delete_Init_Machine + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Delete the init machine on an existing cluster + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 2 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/deleting/rke2k3s/delete_init_machine.go b/validation/deleting/rke2k3s/delete_init_machine.go deleted file mode 100644 index 9b5861546..000000000 --- a/validation/deleting/rke2k3s/delete_init_machine.go +++ /dev/null @@ -1,38 +0,0 @@ -package rke2k3s - -import ( - "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/defaults" - "github.com/rancher/shepherd/extensions/defaults/stevetypes" - "github.com/rancher/shepherd/extensions/steve" - "github.com/rancher/tests/actions/machinepools" - "github.com/sirupsen/logrus" -) - -// DeleteInitMachine deletes the init machine from the specified cluster. -func DeleteInitMachine(client *rancher.Client, clusterID string) error { - initMachine, err := machinepools.GetInitMachine(client, clusterID) - if err != nil { - return err - } - - err = client.Steve.SteveType(stevetypes.Machine).Delete(initMachine) - if err != nil { - return err - } - - logrus.Debugf("Waiting for the init machine to be deleted on cluster (%s)", clusterID) - err = steve.WaitForResourceDeletion(client.Steve, initMachine, defaults.FiveHundredMillisecondTimeout, defaults.TenMinuteTimeout) - if err != nil { - return err - } - - logrus.Debugf("Waiting for the init machine to be replaced on cluster (%s)", clusterID) - err = clusters.WatchAndWaitForCluster(client, clusterID) - if err != nil { - return err - } - - return nil -} diff --git a/validation/deleting/rke2k3s/schemas/hostbusters_schemas.yaml b/validation/deleting/rke2k3s/schemas/hostbusters_schemas.yaml deleted file mode 100644 index 773cbce4a..000000000 --- a/validation/deleting/rke2k3s/schemas/hostbusters_schemas.yaml +++ /dev/null @@ -1,86 +0,0 @@ -- suite: Go Automation/Deleting - projects: [RRT, RM] - cases: - - description: Deletes an existing cluster - title: RKE2_Delete_Cluster - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster is deleted - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes an existing cluster - title: K3S_Delete_Cluster - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster is deleted - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes the init machine on an existing cluster - title: RKE2_Delete_Init_Machine - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the init machine on an existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Deletes the init machine on an existing cluster - title: K3S_Delete_Init_Machine - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Delete the init machine on an existing cluster - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 2 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters \ No newline at end of file