From 912ab8743bbf7bf21024b936f9b8a67f42da0d91 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 23 Jan 2026 21:41:46 +0000 Subject: [PATCH 1/3] Initial plan From 5f5ed152902a2a967649539fccbc3a83cedaddd8 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 23 Jan 2026 21:46:04 +0000 Subject: [PATCH 2/3] Add vSphere RKE2 provisioning test case with documentation Co-authored-by: slickwarren <16691014+slickwarren@users.noreply.github.com> --- validation/provisioning/rke2/README.md | 21 +++ .../rke2/vsphere_provisioning_test.go | 149 ++++++++++++++++++ 2 files changed, 170 insertions(+) create mode 100644 validation/provisioning/rke2/vsphere_provisioning_test.go diff --git a/validation/provisioning/rke2/README.md b/validation/provisioning/rke2/README.md index 2c5cb4600..4181034ee 100644 --- a/validation/provisioning/rke2/README.md +++ b/validation/provisioning/rke2/README.md @@ -251,6 +251,27 @@ Hostname truncation test verifies that the node hostname is truncated properly. #### Run Commands: 1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/provisioning/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestHostnameTruncation -timeout=1h -v` + +### vSphere Provisioning Test + +#### Description: +vSphere Provisioning test verifies that RKE2 clusters can be provisioned on vSphere with various node configurations, including all-in-one, shared roles, dedicated roles, and Windows nodes. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) (vSphere credentials required) +2. [Cluster Config](#cluster-config) (provider must be set to "vsphere") +3. [Machine Config](#machine-config) (vSphere machine configuration required) + +#### Table Tests: +1. `RKE2_vSphere|etcd_cp_worker` - Single node with all roles +2. `RKE2_vSphere|etcd_cp|worker` - Shared etcd/control plane nodes with separate worker nodes +3. `RKE2_vSphere|etcd|cp|worker` - Dedicated nodes for each role +4. `RKE2_vSphere|etcd|cp|worker|windows` - Dedicated roles with Windows worker nodes +5. `RKE2_vSphere|3_etcd|2_cp|3_worker` - Multiple nodes per role (3 etcd, 2 control plane, 3 worker) + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/provisioning/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestVSphereProvisioning -timeout=1h -v` + ### All Tests #### Description: diff --git a/validation/provisioning/rke2/vsphere_provisioning_test.go b/validation/provisioning/rke2/vsphere_provisioning_test.go new file mode 100644 index 000000000..bf5b1c5ef --- /dev/null +++ b/validation/provisioning/rke2/vsphere_provisioning_test.go @@ -0,0 +1,149 @@ +//go:build validation || recurring + +package rke2 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/cloudcredentials" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/provisioninginput" + "github.com/rancher/tests/actions/qase" + "github.com/rancher/tests/actions/workloads" + "github.com/rancher/tests/actions/workloads/deployment" + "github.com/rancher/tests/actions/workloads/pods" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +type vSphereProvisioningTest struct { + client *rancher.Client + session *session.Session + standardUserClient *rancher.Client + cattleConfig map[string]any +} + +func vSphereProvisioningSetup(t *testing.T) vSphereProvisioningTest { + var r vSphereProvisioningTest + testSession := session.NewSession() + r.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(t, err) + r.client = client + + r.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + r.cattleConfig, err = defaults.LoadPackageDefaults(r.cattleConfig, "") + require.NoError(t, err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, r.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(t, err) + + r.cattleConfig, err = defaults.SetK8sDefault(r.client, defaults.RKE2, r.cattleConfig) + require.NoError(t, err) + + r.standardUserClient, _, _, err = standard.CreateStandardUser(r.client) + require.NoError(t, err) + + return r +} + +func TestVSphereProvisioning(t *testing.T) { + t.Parallel() + r := vSphereProvisioningSetup(t) + + // Verify provider is vsphere + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, r.cattleConfig, clusterConfig) + if clusterConfig.Provider != "vsphere" { + t.Skip("vSphere provisioning test requires access to vsphere") + } + + nodeRolesAll := []provisioninginput.MachinePools{provisioninginput.AllRolesMachinePool} + nodeRolesShared := []provisioninginput.MachinePools{provisioninginput.EtcdControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + nodeRolesDedicated := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + nodeRolesWindows := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool, provisioninginput.WindowsMachinePool} + nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + + nodeRolesStandard[0].MachinePoolConfig.Quantity = 3 + nodeRolesStandard[1].MachinePoolConfig.Quantity = 2 + nodeRolesStandard[2].MachinePoolConfig.Quantity = 3 + + tests := []struct { + name string + machinePools []provisioninginput.MachinePools + client *rancher.Client + isWindows bool + }{ + {"RKE2_vSphere|etcd_cp_worker", nodeRolesAll, r.standardUserClient, false}, + {"RKE2_vSphere|etcd_cp|worker", nodeRolesShared, r.standardUserClient, false}, + {"RKE2_vSphere|etcd|cp|worker", nodeRolesDedicated, r.standardUserClient, false}, + {"RKE2_vSphere|etcd|cp|worker|windows", nodeRolesWindows, r.standardUserClient, true}, + {"RKE2_vSphere|3_etcd|2_cp|3_worker", nodeRolesStandard, r.standardUserClient, false}, + } + + for _, tt := range tests { + var err error + t.Cleanup(func() { + logrus.Infof("Running cleanup (%s)", tt.name) + r.session.Cleanup() + }) + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, r.cattleConfig, clusterConfig) + clusterConfig.MachinePools = tt.machinePools + + provider := provisioning.CreateProvider(clusterConfig.Provider) + credentialSpec := cloudcredentials.LoadCloudCredential(string(provider.Name)) + machineConfigSpec := provider.LoadMachineConfigFunc(r.cattleConfig) + + logrus.Info("Provisioning vSphere cluster") + cluster, err := provisioning.CreateProvisioningCluster(tt.client, provider, credentialSpec, clusterConfig, machineConfigSpec, nil) + require.NoError(t, err) + + logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) + provisioning.VerifyClusterReady(t, r.client, cluster) + + logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) + err = deployment.VerifyClusterDeployments(tt.client, cluster) + require.NoError(t, err) + + logrus.Infof("Verifying cluster pods (%s)", cluster.Name) + err = pods.VerifyClusterPods(r.client, cluster) + require.NoError(t, err) + + workloadConfigs := new(workloads.Workloads) + operations.LoadObjectFromMap(workloads.WorkloadsConfigurationFileKey, r.cattleConfig, workloadConfigs) + + logrus.Infof("Creating workloads (%s)", cluster.Name) + workloadConfigs, err = workloads.CreateWorkloads(r.client, cluster.Name, *workloadConfigs) + require.NoError(t, err) + + logrus.Infof("Verifying workloads (%s)", cluster.Name) + _, err = workloads.VerifyWorkloads(r.client, cluster.Name, *workloadConfigs) + require.NoError(t, err) + }) + + params := provisioning.GetProvisioningSchemaParams(tt.client, r.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} From 5868b54d7c51285e07e40a1bd36cb0c0eb64ae9c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 23 Jan 2026 21:47:28 +0000 Subject: [PATCH 3/3] Fix loop variable capture in vSphere provisioning test Co-authored-by: slickwarren <16691014+slickwarren@users.noreply.github.com> --- validation/provisioning/rke2/vsphere_provisioning_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/validation/provisioning/rke2/vsphere_provisioning_test.go b/validation/provisioning/rke2/vsphere_provisioning_test.go index bf5b1c5ef..ee29884ad 100644 --- a/validation/provisioning/rke2/vsphere_provisioning_test.go +++ b/validation/provisioning/rke2/vsphere_provisioning_test.go @@ -96,6 +96,7 @@ func TestVSphereProvisioning(t *testing.T) { } for _, tt := range tests { + tt := tt // capture loop variable var err error t.Cleanup(func() { logrus.Infof("Running cleanup (%s)", tt.name)