Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions validation/provisioning/rke2/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,27 @@ Hostname truncation test verifies that the node hostname is truncated properly.
#### Run Commands:
1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/provisioning/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestHostnameTruncation -timeout=1h -v`


### vSphere Provisioning Test

#### Description:
vSphere Provisioning test verifies that RKE2 clusters can be provisioned on vSphere with various node configurations, including all-in-one, shared roles, dedicated roles, and Windows nodes.

#### Required Configurations:
1. [Cloud Credential](#cloud-credential-config) (vSphere credentials required)
2. [Cluster Config](#cluster-config) (provider must be set to "vsphere")
3. [Machine Config](#machine-config) (vSphere machine configuration required)

#### Table Tests:
1. `RKE2_vSphere|etcd_cp_worker` - Single node with all roles
2. `RKE2_vSphere|etcd_cp|worker` - Shared etcd/control plane nodes with separate worker nodes
3. `RKE2_vSphere|etcd|cp|worker` - Dedicated nodes for each role
4. `RKE2_vSphere|etcd|cp|worker|windows` - Dedicated roles with Windows worker nodes
5. `RKE2_vSphere|3_etcd|2_cp|3_worker` - Multiple nodes per role (3 etcd, 2 control plane, 3 worker)

#### Run Commands:
1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/provisioning/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestVSphereProvisioning -timeout=1h -v`

### All Tests

#### Description:
Expand Down
150 changes: 150 additions & 0 deletions validation/provisioning/rke2/vsphere_provisioning_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
//go:build validation || recurring

package rke2

import (
"os"
"testing"

"github.com/rancher/shepherd/clients/rancher"
"github.com/rancher/shepherd/extensions/cloudcredentials"
"github.com/rancher/shepherd/pkg/config"
"github.com/rancher/shepherd/pkg/config/operations"
"github.com/rancher/shepherd/pkg/session"
"github.com/rancher/tests/actions/clusters"
"github.com/rancher/tests/actions/config/defaults"
"github.com/rancher/tests/actions/logging"
"github.com/rancher/tests/actions/provisioning"
"github.com/rancher/tests/actions/provisioninginput"
"github.com/rancher/tests/actions/qase"
"github.com/rancher/tests/actions/workloads"
"github.com/rancher/tests/actions/workloads/deployment"
"github.com/rancher/tests/actions/workloads/pods"
standard "github.com/rancher/tests/validation/provisioning/resources/standarduser"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
)

type vSphereProvisioningTest struct {
client *rancher.Client
session *session.Session
standardUserClient *rancher.Client
cattleConfig map[string]any
}

func vSphereProvisioningSetup(t *testing.T) vSphereProvisioningTest {
var r vSphereProvisioningTest
testSession := session.NewSession()
r.session = testSession

client, err := rancher.NewClient("", testSession)
require.NoError(t, err)
r.client = client

r.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey))

r.cattleConfig, err = defaults.LoadPackageDefaults(r.cattleConfig, "")
require.NoError(t, err)

loggingConfig := new(logging.Logging)
operations.LoadObjectFromMap(logging.LoggingKey, r.cattleConfig, loggingConfig)

err = logging.SetLogger(loggingConfig)
require.NoError(t, err)

r.cattleConfig, err = defaults.SetK8sDefault(r.client, defaults.RKE2, r.cattleConfig)
require.NoError(t, err)

r.standardUserClient, _, _, err = standard.CreateStandardUser(r.client)
require.NoError(t, err)

return r
}

func TestVSphereProvisioning(t *testing.T) {
t.Parallel()
r := vSphereProvisioningSetup(t)

// Verify provider is vsphere
clusterConfig := new(clusters.ClusterConfig)
operations.LoadObjectFromMap(defaults.ClusterConfigKey, r.cattleConfig, clusterConfig)
if clusterConfig.Provider != "vsphere" {
t.Skip("vSphere provisioning test requires access to vsphere")
}

nodeRolesAll := []provisioninginput.MachinePools{provisioninginput.AllRolesMachinePool}
nodeRolesShared := []provisioninginput.MachinePools{provisioninginput.EtcdControlPlaneMachinePool, provisioninginput.WorkerMachinePool}
nodeRolesDedicated := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool}
nodeRolesWindows := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool, provisioninginput.WindowsMachinePool}
nodeRolesStandard := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool}

nodeRolesStandard[0].MachinePoolConfig.Quantity = 3
nodeRolesStandard[1].MachinePoolConfig.Quantity = 2
nodeRolesStandard[2].MachinePoolConfig.Quantity = 3

tests := []struct {
name string
machinePools []provisioninginput.MachinePools
client *rancher.Client
isWindows bool
}{
{"RKE2_vSphere|etcd_cp_worker", nodeRolesAll, r.standardUserClient, false},
{"RKE2_vSphere|etcd_cp|worker", nodeRolesShared, r.standardUserClient, false},
{"RKE2_vSphere|etcd|cp|worker", nodeRolesDedicated, r.standardUserClient, false},
{"RKE2_vSphere|etcd|cp|worker|windows", nodeRolesWindows, r.standardUserClient, true},
{"RKE2_vSphere|3_etcd|2_cp|3_worker", nodeRolesStandard, r.standardUserClient, false},
}

for _, tt := range tests {
tt := tt // capture loop variable
var err error
t.Cleanup(func() {
logrus.Infof("Running cleanup (%s)", tt.name)
r.session.Cleanup()
})

t.Run(tt.name, func(t *testing.T) {
t.Parallel()

clusterConfig := new(clusters.ClusterConfig)
operations.LoadObjectFromMap(defaults.ClusterConfigKey, r.cattleConfig, clusterConfig)
clusterConfig.MachinePools = tt.machinePools

provider := provisioning.CreateProvider(clusterConfig.Provider)
credentialSpec := cloudcredentials.LoadCloudCredential(string(provider.Name))
machineConfigSpec := provider.LoadMachineConfigFunc(r.cattleConfig)

logrus.Info("Provisioning vSphere cluster")
cluster, err := provisioning.CreateProvisioningCluster(tt.client, provider, credentialSpec, clusterConfig, machineConfigSpec, nil)
require.NoError(t, err)

logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name)
provisioning.VerifyClusterReady(t, r.client, cluster)

logrus.Infof("Verifying cluster deployments (%s)", cluster.Name)
err = deployment.VerifyClusterDeployments(tt.client, cluster)
require.NoError(t, err)

logrus.Infof("Verifying cluster pods (%s)", cluster.Name)
err = pods.VerifyClusterPods(r.client, cluster)
require.NoError(t, err)

workloadConfigs := new(workloads.Workloads)
operations.LoadObjectFromMap(workloads.WorkloadsConfigurationFileKey, r.cattleConfig, workloadConfigs)

logrus.Infof("Creating workloads (%s)", cluster.Name)
workloadConfigs, err = workloads.CreateWorkloads(r.client, cluster.Name, *workloadConfigs)
require.NoError(t, err)

logrus.Infof("Verifying workloads (%s)", cluster.Name)
_, err = workloads.VerifyWorkloads(r.client, cluster.Name, *workloadConfigs)
require.NoError(t, err)
})

params := provisioning.GetProvisioningSchemaParams(tt.client, r.cattleConfig)
err = qase.UpdateSchemaParameters(tt.name, params)
if err != nil {
logrus.Warningf("Failed to upload schema parameters %s", err)
}
}
}