From 40cc4257431b095d2846039a00c152bb76c895f6 Mon Sep 17 00:00:00 2001 From: github-actions Date: Wed, 21 Jan 2026 15:50:54 -0800 Subject: [PATCH 1/4] adding || pit.harvester.daily tag to relevant tests --- validation/fleet/public_gitrepo_test.go | 2 +- .../provisioning/cloud_provider_test.go | 116 ++++++++++++++++++ .../chartinstall/installation_test.go | 2 +- .../connectivity/network_policy_test.go | 2 +- validation/workloads/workload_test.go | 2 +- 5 files changed, 120 insertions(+), 4 deletions(-) create mode 100644 validation/harvester/provisioning/cloud_provider_test.go diff --git a/validation/fleet/public_gitrepo_test.go b/validation/fleet/public_gitrepo_test.go index ce35f184a..296c71f0a 100644 --- a/validation/fleet/public_gitrepo_test.go +++ b/validation/fleet/public_gitrepo_test.go @@ -1,4 +1,4 @@ -//go:build validation || sanity || pit.daily +//go:build validation || sanity || pit.daily || pit.harvester.daily package fleet diff --git a/validation/harvester/provisioning/cloud_provider_test.go b/validation/harvester/provisioning/cloud_provider_test.go new file mode 100644 index 000000000..3b10051db --- /dev/null +++ b/validation/harvester/provisioning/cloud_provider_test.go @@ -0,0 +1,116 @@ +//go:build validation || recurring || pit.harvester.daily + +package harvester + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/cloudcredentials" + "github.com/rancher/shepherd/extensions/defaults/providers" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/provisioninginput" + "github.com/rancher/tests/actions/qase" + "github.com/rancher/tests/actions/workloads/deployment" + "github.com/rancher/tests/actions/workloads/pods" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type HarvesterProvisioningTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + cattleConfig map[string]any +} + + +func (p *HarvesterProvisioningTestSuite) TearDownSuite() { + p.session.Cleanup() +} +func (p *HarvesterProvisioningTestSuite) SetupSuite() { + testSession := session.NewSession() + p.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(p.T(), err) + + p.client = client + + p.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + p.cattleConfig, err = defaults.SetK8sDefault(client, defaults.RKE2, p.cattleConfig) + require.NoError(p.T(), err) +} + +func (p *HarvesterProvisioningTestSuite) TestHarvesterCloudProvider() { + + nodeRolesDedicated := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + nodeRolesDedicated[0].MachinePoolConfig.Quantity = 1 + nodeRolesDedicated[1].MachinePoolConfig.Quantity = 2 + nodeRolesDedicated[2].MachinePoolConfig.Quantity = 2 + + tests := []struct { + name string + machinePools []provisioninginput.MachinePools + client *rancher.Client + }{ + {"Harvester_oot", nodeRolesDedicated, p.client}, + } + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, p.cattleConfig, clusterConfig) + if clusterConfig.Provider != "harvester" { + p.T().Skip("Harvester Cloud Provider test requires access to harvester.") + } + + for _, tt := range tests { + var err error + + clusterConfig.Provider = providers.Harvester + clusterConfig.MachinePools = tt.machinePools + + provider := provisioning.CreateProvider(clusterConfig.Provider) + credentialSpec := cloudcredentials.LoadCloudCredential(string(provider.Name)) + machineConfigSpec := provider.LoadMachineConfigFunc(p.cattleConfig) + + logrus.Infof("Provisioning cluster") + cluster, err := provisioning.CreateProvisioningCluster(tt.client, provider, credentialSpec, clusterConfig, machineConfigSpec, nil) + require.NoError(p.T(), err) + + logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) + provisioning.VerifyClusterReady(p.T(), p.client, cluster) + + logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) + err = deployment.VerifyClusterDeployments(tt.client, cluster) + require.NoError(p.T(), err) + + logrus.Infof("Verifying cluster pods (%s)", cluster.Name) + err = pods.VerifyClusterPods(p.client, cluster) + require.NoError(p.T(), err) + + logrus.Infof("Verifying cloud provider (%s)", cluster.Name) + provider.VerifyCloudProviderFunc(p.T(), p.client, cluster) + + + params := provisioning.GetProvisioningSchemaParams(tt.client, p.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestHarvesterProvisioningTestSuite(t *testing.T) { + suite.Run(t, new(HarvesterProvisioningTestSuite)) +} diff --git a/validation/longhorn/chartinstall/installation_test.go b/validation/longhorn/chartinstall/installation_test.go index 1ed4c826d..e02ed22ab 100644 --- a/validation/longhorn/chartinstall/installation_test.go +++ b/validation/longhorn/chartinstall/installation_test.go @@ -1,4 +1,4 @@ -//go:build validation || pit.daily +//go:build validation || pit.daily || pit.harvester.daily package longhorn diff --git a/validation/networking/connectivity/network_policy_test.go b/validation/networking/connectivity/network_policy_test.go index fc9a7b954..8a28acb52 100644 --- a/validation/networking/connectivity/network_policy_test.go +++ b/validation/networking/connectivity/network_policy_test.go @@ -1,4 +1,4 @@ -//go:build (validation || infra.rke2k3s || cluster.any || sanity || pit.daily) && !stress && !extended +//go:build (validation || infra.rke2k3s || cluster.any || sanity || pit.daily || pit.harvester.daily) && !stress && !extended package connectivity diff --git a/validation/workloads/workload_test.go b/validation/workloads/workload_test.go index 2f0d734a0..f654a709d 100644 --- a/validation/workloads/workload_test.go +++ b/validation/workloads/workload_test.go @@ -1,4 +1,4 @@ -//go:build (validation || infra.any || cluster.any || sanity || pit.daily) && !stress && !extended +//go:build (validation || infra.any || cluster.any || sanity || pit.daily || pit.harvester.daily) && !stress && !extended package workloads From 4a58b3c9315937d1a2fbf8106ad50c07097e2d6e Mon Sep 17 00:00:00 2001 From: github-actions Date: Thu, 22 Jan 2026 14:45:33 -0800 Subject: [PATCH 2/4] init commit; instructions + scripts for building harvester ISOs --- .../README.md | 34 ++++++++++++++ .../build-harvester-image.sh | 45 +++++++++++++++++++ .../files.conf | 14 ++++++ .../setup-harvester-dev-build.sh | 36 +++++++++++++++ 4 files changed, 129 insertions(+) create mode 100644 scripts/build_and_serve_harvester_images/README.md create mode 100644 scripts/build_and_serve_harvester_images/build-harvester-image.sh create mode 100644 scripts/build_and_serve_harvester_images/files.conf create mode 100644 scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh diff --git a/scripts/build_and_serve_harvester_images/README.md b/scripts/build_and_serve_harvester_images/README.md new file mode 100644 index 000000000..be72ad83c --- /dev/null +++ b/scripts/build_and_serve_harvester_images/README.md @@ -0,0 +1,34 @@ +# Build and Serve -- Harvester Development Builds + +## Prerequisites + +* a machine that: + * has at least 100GB space + * has nginx installed/accessible + +## Setup + +### Dev for Harvester Builds + +Everything should be included in the `setup-harvester-dev-build.sh` script. Simply copy it to your machine, then `chmod 777` the file, and run it. + +## Building Harvester + +Again, everything should be included in the script. The script expects a small disk, so it wipes the built items from the last run each time. +Copy over the file `build-harvester-image.sh`, `chmod 777` and run it. +If you want this to run this regularly, you can setup a cron for it via `crontab -e`. + +## Serving the ISOs + +First, ensure nginx is running. `sudo systemctl status nginx` + +We need to ensure nginx has permission on the file(s) we want to share. **Note** Not sure how necessary this is but you should run it anyways just in case. + +```/bin/bash +sudo usermod -aG webfiles www-data +sudo chgrp -R webfiles /home/ubuntu/harvester +``` + +Next, add `files.conf` to nginx. It should live in this dir `/etc/nginx/conf.d/`. Update ports here if you'd like in `files.conf`. +Then, test the changes `sudo nginx -t` should say the configuration is valid. +Finally, you should be able to visit your node's IP address:port you selected. You should see a single folder called `latest` that contains the ISO amongst other things that were built. diff --git a/scripts/build_and_serve_harvester_images/build-harvester-image.sh b/scripts/build_and_serve_harvester_images/build-harvester-image.sh new file mode 100644 index 000000000..e1eac877a --- /dev/null +++ b/scripts/build_and_serve_harvester_images/build-harvester-image.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +cd harvester + +git checkout v1.7 +git fetch + +before=$(git rev-parse HEAD) +output=$(git rebase origin/v1.7 2>&1) +after=$(git rev-parse HEAD) + +if [[ "$before" == "$after" ]]; then + exit 0 +fi + +echo "$output" + +docker system prune -f +docker volume prune -f + +cd dist/artifacts/ + +rm -rf old/ +rm -rf latest/ + +cd ../../ + +# can run make before-hand, but I don't think its necessary if I just want images. . +# make +make build-iso +sleep 10 + +cd dist/artifacts/ + +mkdir latest +mv harvester-* latest +cd latest + +for file in *master*; do + mv "$file" "${file//master/latest}" +done + +cd .. +mv "image-lists-amd64.tar.gz" latest/ + diff --git a/scripts/build_and_serve_harvester_images/files.conf b/scripts/build_and_serve_harvester_images/files.conf new file mode 100644 index 000000000..b83278b52 --- /dev/null +++ b/scripts/build_and_serve_harvester_images/files.conf @@ -0,0 +1,14 @@ +server { + # this can be any port you want, but this was originally setup on a rke2 cluster using nodeport so I kept the format/range + listen 32309; + listen [::]:32309; + server_name _; + + root /home/ubuntu/harvester/dist/artifacts; + + location / { + autoindex on; + autoindex_exact_size off; + autoindex_localtime on; + } +} diff --git a/scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh b/scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh new file mode 100644 index 000000000..33673ca97 --- /dev/null +++ b/scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Add Docker's official GPG key: +sudo apt update +sudo apt install ca-certificates curl make nginx +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc + +# Add the repository to Apt sources: +sudo tee /etc/apt/sources.list.d/docker.sources <> .bashrc + +git clone https://github.com/harvester/harvester.git + +cd harvester + +git checkout v1.7 From 3cc7323b4306ffa4123a4ab309e1804318681e6f Mon Sep 17 00:00:00 2001 From: github-actions Date: Fri, 23 Jan 2026 09:23:07 -0800 Subject: [PATCH 3/4] adjusting schema; rm'd unnecessary variables --- TAG_GUIDE.md | 3 + .../provisioning/cloud_provider_test.go | 74 ++++++++----------- .../harvester/schemas/harvester_schemas.yaml | 4 +- 3 files changed, 34 insertions(+), 47 deletions(-) diff --git a/TAG_GUIDE.md b/TAG_GUIDE.md index 9c06fe198..f8dd5e7c0 100644 --- a/TAG_GUIDE.md +++ b/TAG_GUIDE.md @@ -99,6 +99,9 @@ The pit tags organizes Go tests using **build tags**. These tags define when and ##### pit.daily - Tests that should run **every day** +##### pit.harvester.daily +- Tests that should run **every day** on a harvester setup + ##### pit.weekly - Tests that should run **once per week** diff --git a/validation/harvester/provisioning/cloud_provider_test.go b/validation/harvester/provisioning/cloud_provider_test.go index 3b10051db..a6f586acc 100644 --- a/validation/harvester/provisioning/cloud_provider_test.go +++ b/validation/harvester/provisioning/cloud_provider_test.go @@ -26,15 +26,15 @@ import ( type HarvesterProvisioningTestSuite struct { suite.Suite - client *rancher.Client - session *session.Session - cattleConfig map[string]any + client *rancher.Client + session *session.Session + cattleConfig map[string]any } - func (p *HarvesterProvisioningTestSuite) TearDownSuite() { p.session.Cleanup() } + func (p *HarvesterProvisioningTestSuite) SetupSuite() { testSession := session.NewSession() p.session = testSession @@ -50,64 +50,48 @@ func (p *HarvesterProvisioningTestSuite) SetupSuite() { require.NoError(p.T(), err) } -func (p *HarvesterProvisioningTestSuite) TestHarvesterCloudProvider() { +func (p *HarvesterProvisioningTestSuite) TestCloudProvider() { nodeRolesDedicated := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} nodeRolesDedicated[0].MachinePoolConfig.Quantity = 1 nodeRolesDedicated[1].MachinePoolConfig.Quantity = 2 nodeRolesDedicated[2].MachinePoolConfig.Quantity = 2 - - tests := []struct { - name string - machinePools []provisioninginput.MachinePools - client *rancher.Client - }{ - {"Harvester_oot", nodeRolesDedicated, p.client}, - } - clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, p.cattleConfig, clusterConfig) - if clusterConfig.Provider != "harvester" { - p.T().Skip("Harvester Cloud Provider test requires access to harvester.") - } + var err error - for _, tt := range tests { - var err error + clusterConfig.Provider = providers.Harvester + clusterConfig.MachinePools = nodeRolesDedicated - clusterConfig.Provider = providers.Harvester - clusterConfig.MachinePools = tt.machinePools + provider := provisioning.CreateProvider(clusterConfig.Provider) + credentialSpec := cloudcredentials.LoadCloudCredential(string(provider.Name)) + machineConfigSpec := provider.LoadMachineConfigFunc(p.cattleConfig) - provider := provisioning.CreateProvider(clusterConfig.Provider) - credentialSpec := cloudcredentials.LoadCloudCredential(string(provider.Name)) - machineConfigSpec := provider.LoadMachineConfigFunc(p.cattleConfig) - - logrus.Infof("Provisioning cluster") - cluster, err := provisioning.CreateProvisioningCluster(tt.client, provider, credentialSpec, clusterConfig, machineConfigSpec, nil) - require.NoError(p.T(), err) + logrus.Infof("Provisioning cluster") + cluster, err := provisioning.CreateProvisioningCluster(p.client, provider, credentialSpec, clusterConfig, machineConfigSpec, nil) + require.NoError(p.T(), err) - logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) - provisioning.VerifyClusterReady(p.T(), p.client, cluster) + logrus.Infof("Verifying the cluster is ready (%s)", cluster.Name) + provisioning.VerifyClusterReady(p.T(), p.client, cluster) - logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) - err = deployment.VerifyClusterDeployments(tt.client, cluster) - require.NoError(p.T(), err) + logrus.Infof("Verifying cluster deployments (%s)", cluster.Name) + err = deployment.VerifyClusterDeployments(p.client, cluster) + require.NoError(p.T(), err) - logrus.Infof("Verifying cluster pods (%s)", cluster.Name) - err = pods.VerifyClusterPods(p.client, cluster) - require.NoError(p.T(), err) + logrus.Infof("Verifying cluster pods (%s)", cluster.Name) + err = pods.VerifyClusterPods(p.client, cluster) + require.NoError(p.T(), err) - logrus.Infof("Verifying cloud provider (%s)", cluster.Name) - provider.VerifyCloudProviderFunc(p.T(), p.client, cluster) - + logrus.Infof("Verifying cloud provider (%s)", cluster.Name) + provider.VerifyCloudProviderFunc(p.T(), p.client, cluster) - params := provisioning.GetProvisioningSchemaParams(tt.client, p.cattleConfig) - err = qase.UpdateSchemaParameters(tt.name, params) - if err != nil { - logrus.Warningf("Failed to upload schema parameters %s", err) - } + params := provisioning.GetProvisioningSchemaParams(p.client, p.cattleConfig) + err = qase.UpdateSchemaParameters("Harvester_oot", params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) } -} +} // In order for 'go test' to run this suite, we need to create // a normal test function and pass our suite to suite.Run diff --git a/validation/harvester/schemas/harvester_schemas.yaml b/validation/harvester/schemas/harvester_schemas.yaml index 4c275eaa6..710ad3383 100644 --- a/validation/harvester/schemas/harvester_schemas.yaml +++ b/validation/harvester/schemas/harvester_schemas.yaml @@ -34,7 +34,7 @@ custom_field: "15": "TestRKE2ProvisioningTestSuite TestK3SProvisioningTestSuite TestRKE1ProvisioningTestSuite" - title: "Node Driver Cloud Provider RKE2 - Loadbalancing and Storage" - description: "TestRKE2ProvisioningTestSuite TestK3SProvisioningTestSuite TestRKE1ProvisioningTestSuite" + description: "HarvesterProvisioningTestSuite" automation: 2 steps: - action: "Prerequisites" @@ -50,7 +50,7 @@ expectedresult: "writing and reading file(s) in the mountpoint are successful" position: 3 custom_field: - "15": "TestRKE2ProvisioningTestSuite TestK3SProvisioningTestSuite TestRKE1ProvisioningTestSuite" + "15": "HarvesterProvisioningTestSuite" - title: "Custom Cluster Provisioning" description: "TestCustomClusterRKE2ProvisioningTestSuite TestCustomClusterK3SProvisioningTestSuite TestCustomClusterRKE1ProvisioningTestSuite" automation: 2 From 3110e6801eecd4111272961b302ae72a83345001 Mon Sep 17 00:00:00 2001 From: github-actions Date: Fri, 23 Jan 2026 14:14:19 -0800 Subject: [PATCH 4/4] reverting harvester build scripts --- .../README.md | 34 -------------- .../build-harvester-image.sh | 45 ------------------- .../files.conf | 14 ------ .../setup-harvester-dev-build.sh | 36 --------------- 4 files changed, 129 deletions(-) delete mode 100644 scripts/build_and_serve_harvester_images/README.md delete mode 100644 scripts/build_and_serve_harvester_images/build-harvester-image.sh delete mode 100644 scripts/build_and_serve_harvester_images/files.conf delete mode 100644 scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh diff --git a/scripts/build_and_serve_harvester_images/README.md b/scripts/build_and_serve_harvester_images/README.md deleted file mode 100644 index be72ad83c..000000000 --- a/scripts/build_and_serve_harvester_images/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Build and Serve -- Harvester Development Builds - -## Prerequisites - -* a machine that: - * has at least 100GB space - * has nginx installed/accessible - -## Setup - -### Dev for Harvester Builds - -Everything should be included in the `setup-harvester-dev-build.sh` script. Simply copy it to your machine, then `chmod 777` the file, and run it. - -## Building Harvester - -Again, everything should be included in the script. The script expects a small disk, so it wipes the built items from the last run each time. -Copy over the file `build-harvester-image.sh`, `chmod 777` and run it. -If you want this to run this regularly, you can setup a cron for it via `crontab -e`. - -## Serving the ISOs - -First, ensure nginx is running. `sudo systemctl status nginx` - -We need to ensure nginx has permission on the file(s) we want to share. **Note** Not sure how necessary this is but you should run it anyways just in case. - -```/bin/bash -sudo usermod -aG webfiles www-data -sudo chgrp -R webfiles /home/ubuntu/harvester -``` - -Next, add `files.conf` to nginx. It should live in this dir `/etc/nginx/conf.d/`. Update ports here if you'd like in `files.conf`. -Then, test the changes `sudo nginx -t` should say the configuration is valid. -Finally, you should be able to visit your node's IP address:port you selected. You should see a single folder called `latest` that contains the ISO amongst other things that were built. diff --git a/scripts/build_and_serve_harvester_images/build-harvester-image.sh b/scripts/build_and_serve_harvester_images/build-harvester-image.sh deleted file mode 100644 index e1eac877a..000000000 --- a/scripts/build_and_serve_harvester_images/build-harvester-image.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -cd harvester - -git checkout v1.7 -git fetch - -before=$(git rev-parse HEAD) -output=$(git rebase origin/v1.7 2>&1) -after=$(git rev-parse HEAD) - -if [[ "$before" == "$after" ]]; then - exit 0 -fi - -echo "$output" - -docker system prune -f -docker volume prune -f - -cd dist/artifacts/ - -rm -rf old/ -rm -rf latest/ - -cd ../../ - -# can run make before-hand, but I don't think its necessary if I just want images. . -# make -make build-iso -sleep 10 - -cd dist/artifacts/ - -mkdir latest -mv harvester-* latest -cd latest - -for file in *master*; do - mv "$file" "${file//master/latest}" -done - -cd .. -mv "image-lists-amd64.tar.gz" latest/ - diff --git a/scripts/build_and_serve_harvester_images/files.conf b/scripts/build_and_serve_harvester_images/files.conf deleted file mode 100644 index b83278b52..000000000 --- a/scripts/build_and_serve_harvester_images/files.conf +++ /dev/null @@ -1,14 +0,0 @@ -server { - # this can be any port you want, but this was originally setup on a rke2 cluster using nodeport so I kept the format/range - listen 32309; - listen [::]:32309; - server_name _; - - root /home/ubuntu/harvester/dist/artifacts; - - location / { - autoindex on; - autoindex_exact_size off; - autoindex_localtime on; - } -} diff --git a/scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh b/scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh deleted file mode 100644 index 33673ca97..000000000 --- a/scripts/build_and_serve_harvester_images/setup-harvester-dev-build.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Add Docker's official GPG key: -sudo apt update -sudo apt install ca-certificates curl make nginx -sudo install -m 0755 -d /etc/apt/keyrings -sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc -sudo chmod a+r /etc/apt/keyrings/docker.asc - -# Add the repository to Apt sources: -sudo tee /etc/apt/sources.list.d/docker.sources <> .bashrc - -git clone https://github.com/harvester/harvester.git - -cd harvester - -git checkout v1.7