diff --git a/.github/workflows/_build-builder.yml b/.github/workflows/_build-helper.yml similarity index 88% rename from .github/workflows/_build-builder.yml rename to .github/workflows/_build-helper.yml index 20bbcd5..6ff10df 100644 --- a/.github/workflows/_build-builder.yml +++ b/.github/workflows/_build-helper.yml @@ -1,4 +1,4 @@ -name: Build and Push Builder (Reusable) +name: Build and Push Helper (Reusable) on: workflow_call: @@ -46,13 +46,13 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Extract metadata (tags, labels) for Builder + - name: Extract metadata (tags, labels) for Helper id: meta uses: docker/metadata-action@v5 with: images: | - ghcr.io/${{ github.repository }}/builder - ${{ secrets.DOCKERHUB_USERNAME }}/builder + ghcr.io/${{ github.repository }}/helper + ${{ secrets.DOCKERHUB_USERNAME }}/helper tags: | # on git tag push, create a tag with the version number (e.g., v1.2.3) type=ref,event=tag @@ -61,11 +61,11 @@ jobs: # create a tag with the git sha for every push type=sha - - name: Build and push Builder Plugin + - name: Build and push Helper Plugin uses: docker/build-push-action@v5 with: - context: ./plugins/builder - file: ./plugins/builder/Dockerfile + context: ./plugins/helper + file: ./plugins/helper/Dockerfile push: ${{ inputs.push-images }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/cloudness-builder.yml b/.github/workflows/cloudness-helper.yml similarity index 78% rename from .github/workflows/cloudness-builder.yml rename to .github/workflows/cloudness-helper.yml index a7b41d9..8cc318c 100644 --- a/.github/workflows/cloudness-builder.yml +++ b/.github/workflows/cloudness-helper.yml @@ -1,4 +1,4 @@ -name: Plugin - Builder +name: Plugin - Helper on: # push: @@ -15,12 +15,12 @@ env: IMAGE_NAME: ${{ github.repository }} jobs: - build-cloudness-builder: - name: Builder Plugin + build-cloudness-helper: + name: Helper Plugin permissions: contents: read packages: write - uses: ./.github/workflows/_build-builder.yml + uses: ./.github/workflows/_build-helper.yml secrets: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/cloudness-release.yml b/.github/workflows/cloudness-release.yml index c55b5e3..f01223a 100644 --- a/.github/workflows/cloudness-release.yml +++ b/.github/workflows/cloudness-release.yml @@ -24,12 +24,12 @@ jobs: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - build-cloudness-builder: - name: Builder Plugin + build-cloudness-helper: + name: Helper Plugin permissions: contents: read packages: write - uses: ./.github/workflows/_build-builder.yml + uses: ./.github/workflows/_build-helper.yml with: enable-cache: true secrets: @@ -38,7 +38,7 @@ jobs: upload-scripts: name: Upload Scripts - needs: [build-cloudness-app, build-cloudness-builder] + needs: [build-cloudness-app, build-cloudness-helper] permissions: contents: read uses: ./.github/workflows/_upload-scripts.yml diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index b91ce3f..79ee880 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -18,16 +18,3 @@ jobs: secrets: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - - validate-builder: - name: Validate Builder - permissions: - contents: read - packages: write - uses: ./.github/workflows/_build-builder.yml - with: - enable-cache: true - push-images: false - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/app/pipeline/convert/build_step.go b/app/pipeline/convert/build_step.go index 54eb5f0..7063d1b 100644 --- a/app/pipeline/convert/build_step.go +++ b/app/pipeline/convert/build_step.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/cloudness-io/cloudness/app/pipeline" - "github.com/cloudness-io/cloudness/app/services/config" specSvc "github.com/cloudness-io/cloudness/app/services/spec" "github.com/cloudness-io/cloudness/types" "github.com/cloudness-io/cloudness/types/enum" @@ -22,136 +21,53 @@ func buildCommandNew( return nil } - switch specSvc.GetBuilder(spec) { - case enum.BuilderTypeDockerfile: - updateDockerFileBuildStep(in, pCtx, spec, step, buildVars) - case enum.BuilderTypeNixpacks: - updateNixpacksBuildStep(in, pCtx, spec, step, buildVars) - } - - addImageSecrets(in, pCtx, step) - addSecrets(pCtx, step, buildVars) - - return nil -} - -func updateDockerFileBuildStep( - in *pipeline.RunnerContextInput, - pCtx *pipeline.RunnerContext, - spec *types.ApplicationSpec, - step *pipeline.Step, - buildVars map[string]string, -) { - var cmd strings.Builder gitSource := spec.Build.Source.Git sourcePath := wsBuildVolumePath if gitSource.BasePath != "" && gitSource.BasePath != "/" { sourcePath = wsBuildVolumePath + gitSource.BasePath } - image, _, cacheImage := specSvc.GetImage(in.Application, in.Deployment, in.Config) - addBuildKitConfig(&cmd, in.Config) + image, _, cacheImage := specSvc.GetImage(in.Application, in.Deployment, in.Config) - // Construct the buildctl-daemonless.sh command - cmd.WriteString(fmt.Sprintf(`buildctl-daemonless.sh build \ - --frontend=dockerfile.v0 \ - --local context=%[1]s \ - --local dockerfile=%[1]s \ - --opt filename=%[2]s \ - --output type=image,name=%[3]s,push=true `, sourcePath, gitSource.Dockerfile, image)) + // Set common build environment variables + addSecret(pCtx, step, "CLOUDNESS_BUILD_SOURCE_PATH", sourcePath) + addSecret(pCtx, step, "CLOUDNESS_BUILD_IMAGE", image) + addSecret(pCtx, step, "CLOUDNESS_BUILD_CACHE_IMAGE", cacheImage) + addSecret(pCtx, step, "CLOUDNESS_IMAGE_REGISTRY", in.Config.PushRegistryURL) + addSecret(pCtx, step, "CLOUDNESS_IMAGE_MIRROR_REGISTRY", in.Config.MirrorRegistryURL) - if cacheImage != "" { - cmd.WriteString(fmt.Sprintf(`--export-cache type=registry,ref=%s,mode=max `, cacheImage)) - cmd.WriteString(fmt.Sprintf(`--import-cache type=registry,ref=%s,mode=max `, cacheImage)) + if in.Config.MirrorRegistryEnabled && in.Config.MirrorRegistryURL != "" { + addVariable(pCtx, step, "CLOUDNESS_MIRROR_ENABLED", "true") } - // Add BuildArgs + // Set build args as space-separated key=value pairs if len(buildVars) > 0 { + var args []string for k, v := range buildVars { - cmd.WriteString(fmt.Sprintf(`--opt build-arg:%s=%s `, k, v)) // + args = append(args, fmt.Sprintf("%s=%s", k, v)) } + addSecret(pCtx, step, "CLOUDNESS_BUILD_ARGS", strings.Join(args, " ")) } - step.AddScriptCmd(cmd.String()) -} - -func updateNixpacksBuildStep( - in *pipeline.RunnerContextInput, - pCtx *pipeline.RunnerContext, - spec *types.ApplicationSpec, - step *pipeline.Step, - buildVars map[string]string, -) { - gitSource := spec.Build.Source.Git - sourcePath := wsBuildVolumePath - if gitSource.BasePath != "" && gitSource.BasePath != "/" { - sourcePath = wsBuildVolumePath + gitSource.BasePath - } - - image, _, cacheImage := specSvc.GetImage(in.Application, in.Deployment, in.Config) - nixCommand := []string{fmt.Sprintf("nixpacks build %[1]s -o %[1]s", sourcePath)} - nixCommand = append(nixCommand, fmt.Sprintf("--name %s", image)) - if gitSource.BuildCommand != "" { - nixCommand = append(nixCommand, fmt.Sprintf(`--build-cmd "%s"`, gitSource.BuildCommand)) - } - if spec.Deploy.StartCommand != "" { - nixCommand = append(nixCommand, fmt.Sprintf(`--start-cmd "%s"`, spec.Deploy.StartCommand)) - } - - for key, value := range buildVars { - nixCommand = append(nixCommand, fmt.Sprintf(`--env %s="%s"`, key, value)) - } - nixCommand = append(nixCommand, "--verbose") - step.AddScriptCmd(strings.Join(nixCommand, ` `)) - - var cmd strings.Builder - //generate buildkit toml - addBuildKitConfig(&cmd, in.Config) - - // Construct the buildctl-daemonless.sh command - cmd.WriteString(fmt.Sprintf(`buildctl-daemonless.sh build \ - --frontend=dockerfile.v0 \ - --local context=%[1]s \ - --local dockerfile=%[1]s \ - --opt filename=/.nixpacks/Dockerfile \ - --output type=image,name=%[2]s,push=true `, sourcePath, image)) - - if cacheImage != "" { - cmd.WriteString(fmt.Sprintf(`--export-cache type=registry,ref=%s `, cacheImage)) - cmd.WriteString(fmt.Sprintf(`--import-cache type=registry,ref=%s,mode=max `, cacheImage)) + switch specSvc.GetBuilder(spec) { + case enum.BuilderTypeDockerfile: + addVariable(pCtx, step, "CLOUDNESS_BUILD_TYPE", "dockerfile") + addVariable(pCtx, step, "CLOUDNESS_BUILD_DOCKERFILE", gitSource.Dockerfile) + case enum.BuilderTypeNixpacks: + addSecret(pCtx, step, "CLOUDNESS_BUILD_TYPE", "nixpacks") + if gitSource.BuildCommand != "" { + addVariable(pCtx, step, "CLOUDNESS_BUILD_CMD", gitSource.BuildCommand) + } + if spec.Deploy.StartCommand != "" { + addVariable(pCtx, step, "CLOUDNESS_START_CMD", spec.Deploy.StartCommand) + } } - step.AddScriptCmd(cmd.String()) - -} - -func addImageSecrets(in *pipeline.RunnerContextInput, pCtx *pipeline.RunnerContext, step *pipeline.Step) { - image, pullImage, cacheImage := specSvc.GetImage(in.Application, in.Deployment, in.Config) - addSecret(pCtx, step, "CLOUDNESS_BUILD_IMAGE", image) - addSecret(pCtx, step, "CLOUDNESS_BUILD_PULL_IMAGE", pullImage) - addSecret(pCtx, step, "CLOUDNESS_BUILD_CACHE_IMAGE", cacheImage) - addSecret(pCtx, step, "CLOUDNESS_IMAGE_REGISTRY", in.Config.PushRegistryURL) - addSecret(pCtx, step, "CLOUDNESS_IMAGE_MIRROR_REGISTRY", in.Config.MirrorRegistryURL) -} - -func addBuildKitConfig(cmd *strings.Builder, config *config.PipelineConfig) { - //generate buildkit toml - cmd.WriteString(`BUILDKITD_CONFIG_PATH="$HOME/.config/buildkit/buildkitd.toml"` + "\n") - cmd.WriteString(`mkdir -p "$(dirname "$BUILDKITD_CONFIG_PATH")"` + "\n") - cmd.WriteString(`> "$BUILDKITD_CONFIG_PATH"` + "\n") - cmd.WriteString(fmt.Sprintf(`MAIN_REGISTRY=$(echo %s | cut -d'/' -f1)`+"\n", config.PushRegistryURL)) - cmd.WriteString(`echo "[registry.\"$MAIN_REGISTRY\"]" >> "$BUILDKITD_CONFIG_PATH"` + "\n") - cmd.WriteString(`echo " http = true" >> "$BUILDKITD_CONFIG_PATH"` + "\n") - cmd.WriteString(`echo " insecure = true" >> "$BUILDKITD_CONFIG_PATH"` + "\n") + // Add image secrets for other steps + addSecrets(pCtx, step, buildVars) - if config.MirrorRegistryEnabled && config.MirrorRegistryURL != "" { - // Add the mirror configuration - cmd.WriteString(fmt.Sprintf(`MIRROR_REGISTRY=$(echo %s | cut -d'/' -f1)`+"\n", config.MirrorRegistryURL)) - cmd.WriteString(`echo "[registry.\"$MIRROR_REGISTRY\"]" >> "$BUILDKITD_CONFIG_PATH"` + "\n") - cmd.WriteString(`echo " http = true" >> "$BUILDKITD_CONFIG_PATH"` + "\n") - cmd.WriteString(`echo " insecure = true" >> "$BUILDKITD_CONFIG_PATH"` + "\n") + // Run the build script + step.AddScriptCmd(". /usr/local/lib/build-script.sh") - cmd.WriteString(`echo "[registry.\"docker.io\"]" >> "$BUILDKITD_CONFIG_PATH"` + "\n") - cmd.WriteString(fmt.Sprintf(`echo " mirrors = [\"%s\"]" >> "$BUILDKITD_CONFIG_PATH"`+"\n", config.MirrorRegistryURL)) - } + return nil } diff --git a/app/pipeline/convert/const.go b/app/pipeline/convert/const.go index 3072047..b2a29dd 100644 --- a/app/pipeline/convert/const.go +++ b/app/pipeline/convert/const.go @@ -7,9 +7,6 @@ import ( ) const ( - // images - busyBoxImage = "busybox:1.37.0" - // workspace volume wsBuildVolumePath = "/cloudness/workspace/build" @@ -25,8 +22,8 @@ var ( func getBuilderImage() string { ver := version.Version if ver.Major == 0 && ver.Minor == 0 && ver.Patch == 0 { - return "cloudnessio/builder:latest" + return "cloudnessio/helper:latest" } // Use semver String() which includes prerelease (e.g., "0.1.0-alpha.1") - return fmt.Sprintf("cloudnessio/builder:v%s", ver.String()) + return fmt.Sprintf("cloudnessio/helper:v%s", ver.String()) } diff --git a/app/pipeline/convert/convert.go b/app/pipeline/convert/convert.go index dafc05e..59ac9f4 100644 --- a/app/pipeline/convert/convert.go +++ b/app/pipeline/convert/convert.go @@ -13,6 +13,7 @@ func ToRunnerContext(in *pipeline.RunnerContextInput) (*pipeline.RunnerContext, Steps: []*pipeline.Step{}, InitSteps: []*pipeline.Step{}, Secrets: []*pipeline.Secret{}, + Variables: []*pipeline.Variable{}, } pCtx.RunnerName = getRunnerNamespace(pCtx) @@ -59,7 +60,7 @@ func ToRunnerContext(in *pipeline.RunnerContextInput) (*pipeline.RunnerContext, Envs: map[string]string{}, } - step.AddStripCmds("#!/bin/sh\n\n", "set -e") + step.AddStripCmds("#!/bin/sh\n\n", "set -e", ". /usr/local/lib/cloudness-utils.sh") if err := initCommand(step, in, pCtx, spec); err != nil { return nil, err diff --git a/app/pipeline/convert/deploy_step.go b/app/pipeline/convert/deploy_step.go index 15ee517..16031b7 100644 --- a/app/pipeline/convert/deploy_step.go +++ b/app/pipeline/convert/deploy_step.go @@ -12,7 +12,6 @@ import ( "github.com/cloudness-io/cloudness/types/enum" shlex "github.com/kballard/go-shellquote" - "github.com/rs/zerolog/log" ) func deployCommand( @@ -31,18 +30,28 @@ func deployCommand( return err } - script, common, volume, app, route, err := templates.GenerateKubeTemplates(tmplIn) + common, volume, app, route, err := templates.GenerateKubeTemplates(tmplIn) if err != nil { return err } - step.AddScriptCmd(fmt.Sprintf("cd %s", wsDeployVolumePath)) - step.AddScriptCmd(script) + // Add YAML files as ConfigFiles (will be mounted from ConfigMap) + addConfigFile(pCtx, "common.yaml", common) + addConfigFile(pCtx, "volume.yaml", volume) + addConfigFile(pCtx, "app.yaml", app) + addConfigFile(pCtx, "route.yaml", route) - //General secrets for log sanitization + // Mount ConfigFiles to deploy path + step.ConfigFileMounts = append(step.ConfigFileMounts, &pipeline.ConfigFileMount{ + Path: wsDeployVolumePath, + Keys: []string{"common.yaml", "volume.yaml", "app.yaml", "route.yaml"}, + }) + + // General secrets for log sanitization addSecret(pCtx, step, "CLOUDNESS_DEPLOY_APP_IDENTIFIER", name) addSecret(pCtx, step, "CLOUDNESS_DEPLOY_APP_NAMESPACE", namespace) - // flags + + // Flags addSecret(pCtx, step, "CLOUDNESS_DEPLOY_FLAG_APP_TYPE", string(in.Application.Type)) if len(tmplIn.Volumes) > 0 { addSecret(pCtx, step, "CLOUDNESS_DEPLOY_FLAG_HAS_VOLUME", "1") @@ -50,18 +59,16 @@ func deployCommand( if tmplIn.ServiceDomain != nil { addSecret(pCtx, step, "CLOUDNESS_DEPLOY_FLAG_HAS_ROUTE", "1") } - // kube files - addSecret(pCtx, step, "CLOUDNESS_DEPLOY_YAML_COMMON", common) - addSecret(pCtx, step, "CLOUDNESS_DEPLOY_YAML_VOLUME", volume) - addSecret(pCtx, step, "CLOUDNESS_DEPLOY_YAML_APP", app) - addSecret(pCtx, step, "CLOUDNESS_DEPLOY_YAML_ROUTE", route) - // common + // Deploy path for script to find YAML files + addSecret(pCtx, step, "CLOUDNESS_DEPLOY_PATH", wsDeployVolumePath) + + // Common deployment info addSecret(pCtx, step, "CLOUDNESS_DEPLOY_TARGET_NAMESPACE", namespace) addSecret(pCtx, step, "CLOUDNESS_DEPLOY_TARGET_NAME", name) addSecret(pCtx, step, "CLOUDNESS_DEPLOY_TARGET_IMAGE", pullImage) - // unmount before update volumes ? + // Unmount before update volumes? needsRemount := "0" if in.ServerRestctions.UnmountBeforeResize { if needsVolumeRemount(in.Deployment, in.PreviousDeployment) { @@ -70,8 +77,9 @@ func deployCommand( } addSecret(pCtx, step, "CLOUDNESS_DEPLOY_FLAG_NEED_REMOUNT", needsRemount) - step.VolumeMounts = append(step.VolumeMounts, getDeployVolumeMount(pCtx)) - //TODO: check if deployment is enabled + // Run the Go-based deployer binary + step.AddScriptCmd("cloudness-deploy") + return nil } @@ -171,6 +179,17 @@ func needsVolumeRemount(currDeployment *types.Deployment, prevDeployment *types. } } - log.Error().Msg("Debug: return 4") return false } + +// addConfigFile adds a file to the ConfigMap for mounting +func addConfigFile(pCtx *pipeline.RunnerContext, filename string, content string) { + if content == "" { + return + } + pCtx.ConfigFiles = append(pCtx.ConfigFiles, &pipeline.ConfigFile{ + Key: filename, + Filename: filename, + Content: content, + }) +} diff --git a/app/pipeline/convert/init_step.go b/app/pipeline/convert/init_step.go index 2e42e87..243d9d6 100644 --- a/app/pipeline/convert/init_step.go +++ b/app/pipeline/convert/init_step.go @@ -1,8 +1,6 @@ package convert import ( - "fmt" - "github.com/cloudness-io/cloudness/app/pipeline" specSvc "github.com/cloudness-io/cloudness/app/services/spec" "github.com/cloudness-io/cloudness/types" @@ -19,22 +17,24 @@ func initCommand( } gitSource := spec.Build.Source.Git - step.AddScriptCmd(`echo "Git clone started"`) + + // Set environment variables for the init script + addVariable(pCtx, step, "CLOUDNESS_GIT_REPO_URL", gitSource.RepoURL) + addVariable(pCtx, step, "CLOUDNESS_GIT_BRANCH", gitSource.Branch) + addVariable(pCtx, step, "CLOUDNESS_GIT_COMMIT", gitSource.Commit) + addSecret(pCtx, step, "CLOUDNESS_BUILD_PATH", wsBuildVolumePath) + + // Set netrc credentials if provided if in.Netrc != nil { - step.AddScriptCmd(`echo "machine $GIT_MACHINE login $GIT_LOGIN password $GIT_PASSWORD" > ~/.netrc`) - step.AddScriptCmd(`chmod 600 ~/.netrc`) addSecret(pCtx, step, "GIT_LOGIN", in.Netrc.Login) addSecret(pCtx, step, "GIT_PASSWORD", in.Netrc.Password) addSecret(pCtx, step, "GIT_MACHINE", in.Netrc.Machine) } - step.AddScriptCmd(fmt.Sprintf(`git clone %s --branch %s %s;`, gitSource.RepoURL, gitSource.Branch, wsBuildVolumePath)) - if gitSource.Commit != "" { - step.AddScriptCmd(fmt.Sprintf("git -C %s config advice.detachedHead false", wsBuildVolumePath)) - step.AddScriptCmd(fmt.Sprintf("git -C %s checkout %s", wsBuildVolumePath, gitSource.Commit)) - } - step.AddScriptCmd(`echo "Git clone successful"`) - //volume mounts + // Run the init script + step.AddScriptCmd(". /usr/local/lib/init-script.sh") + + // Volume mounts step.VolumeMounts = append(step.VolumeMounts, getBuildVolumeMount(pCtx)) return nil diff --git a/app/pipeline/convert/secret.go b/app/pipeline/convert/secret.go deleted file mode 100644 index d472629..0000000 --- a/app/pipeline/convert/secret.go +++ /dev/null @@ -1,23 +0,0 @@ -package convert - -import ( - "github.com/cloudness-io/cloudness/app/pipeline" -) - -func addSecrets(ctx *pipeline.RunnerContext, step *pipeline.Step, vars map[string]string) { - for key, val := range vars { - addSecret(ctx, step, key, val) - } -} - -func addSecret(ctx *pipeline.RunnerContext, step *pipeline.Step, key string, value string) { - ctx.Secrets = append(ctx.Secrets, &pipeline.Secret{ - Name: key, - Data: value, - Mask: true, - }) - - step.Secrets = append(step.Secrets, &pipeline.SecretEnv{ - Key: key, - }) -} diff --git a/app/pipeline/convert/templates/static/1-common.yaml b/app/pipeline/convert/templates/static/1-common.yaml index ceb76aa..89e114f 100644 --- a/app/pipeline/convert/templates/static/1-common.yaml +++ b/app/pipeline/convert/templates/static/1-common.yaml @@ -2,6 +2,8 @@ apiVersion: v1 kind: Namespace metadata: name: {{ .Namespace }} + labels: + app.kubernetes.io/managed-by: cloudness --- apiVersion: v1 @@ -10,7 +12,10 @@ metadata: name: {{ .Identifier }}-sa namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: serviceaccount + app.kubernetes.io/managed-by: cloudness --- apiVersion: rbac.authorization.k8s.io/v1 @@ -19,7 +24,10 @@ metadata: name: {{ .Identifier }}-role namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: rbac + app.kubernetes.io/managed-by: cloudness rules: - apiGroups: [""] # Core API group resources: ["services"] @@ -32,7 +40,10 @@ metadata: name: {{ .Identifier }}-rolebinding namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: rbac + app.kubernetes.io/managed-by: cloudness subjects: - kind: ServiceAccount name: {{ .Identifier }}-sa @@ -49,7 +60,10 @@ metadata: name: {{ .Identifier }} namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: config + app.kubernetes.io/managed-by: cloudness data: {{- range $key, $value := .Variables }} {{ $key }}: {{ $value }} @@ -62,7 +76,10 @@ metadata: name: {{ .Identifier }} namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: config + app.kubernetes.io/managed-by: cloudness type: Opaque data: {{- range $key, $value := .Secrets }} diff --git a/app/pipeline/convert/templates/static/2-pvc.yaml b/app/pipeline/convert/templates/static/2-pvc.yaml index a3dc8bd..f93dbad 100644 --- a/app/pipeline/convert/templates/static/2-pvc.yaml +++ b/app/pipeline/convert/templates/static/2-pvc.yaml @@ -6,7 +6,10 @@ metadata: name: {{ .VolumeName }} namespace: {{ $.Namespace }} labels: - identifier: {{ $.Identifier }} + app.kubernetes.io/name: {{ $.Identifier }} + app.kubernetes.io/instance: {{ $.Identifier }} + app.kubernetes.io/component: storage + app.kubernetes.io/managed-by: cloudness spec: accessModes: - ReadWriteOnce diff --git a/app/pipeline/convert/templates/static/3-app.yaml b/app/pipeline/convert/templates/static/3-app.yaml index 91afca5..2fabc7f 100644 --- a/app/pipeline/convert/templates/static/3-app.yaml +++ b/app/pipeline/convert/templates/static/3-app.yaml @@ -5,15 +5,22 @@ metadata: name: {{ .Identifier }} namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: app + app.kubernetes.io/managed-by: cloudness spec: selector: matchLabels: - app: {{ .Identifier }} # Must match .spec.template.metadata.labels + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} template: metadata: labels: - app: {{ .Identifier }} # Must match .spec.selector.matchLabels + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: app + app.kubernetes.io/managed-by: cloudness spec: securityContext: runAsUser: 1001 @@ -66,7 +73,10 @@ metadata: name: {{ .Identifier }}-hpa namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: autoscaling + app.kubernetes.io/managed-by: cloudness spec: scaleTargetRef: apiVersion: apps/v1 @@ -96,15 +106,22 @@ metadata: name: {{ .Identifier }} namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: app + app.kubernetes.io/managed-by: cloudness spec: selector: matchLabels: - app: {{ .Identifier }} # Must match .spec.template.metadata.labels + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} template: metadata: labels: - app: {{ .Identifier }} # Must match .spec.selector.matchLabels + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: app + app.kubernetes.io/managed-by: cloudness spec: securityContext: runAsUser: 1001 diff --git a/app/pipeline/convert/templates/static/4-httproute.yaml b/app/pipeline/convert/templates/static/4-httproute.yaml index 481de50..f5e812a 100644 --- a/app/pipeline/convert/templates/static/4-httproute.yaml +++ b/app/pipeline/convert/templates/static/4-httproute.yaml @@ -6,11 +6,14 @@ metadata: name: {{ .Identifier }} namespace: {{ .Namespace }} labels: - app: {{ .Identifier }} - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: service + app.kubernetes.io/managed-by: cloudness spec: selector: - app: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} ports: {{- range $index, $value := .ServicePorts }} - name: {{ $value }}-port @@ -28,7 +31,10 @@ metadata: name: {{ .Identifier }}-http namespace: {{ .Namespace }} labels: - identifier: {{ .Identifier }} + app.kubernetes.io/name: {{ .Identifier }} + app.kubernetes.io/instance: {{ .Identifier }} + app.kubernetes.io/component: ingress + app.kubernetes.io/managed-by: cloudness spec: parentRefs: {{ if .ServiceDomain.Websecure }} diff --git a/app/pipeline/convert/templates/static/kube-script.sh b/app/pipeline/convert/templates/static/kube-script.sh deleted file mode 100755 index 97ed019..0000000 --- a/app/pipeline/convert/templates/static/kube-script.sh +++ /dev/null @@ -1,220 +0,0 @@ -: "${CLOUDNESS_DEPLOY_APP_IDENTIFIER:=}" -: "${CLOUDNESS_DEPLOY_APP_NAMESPACE:=}" - -# Deployment flags -: "${CLOUDNESS_DEPLOY_FLAG_APP_TYPE:=}" -: "${CLOUDNESS_DEPLOY_FLAG_HAS_VOLUME:=0}" -: "${CLOUDNESS_DEPLOY_FLAG_NEED_REMOUNT:=0}" -: "${CLOUDNESS_DEPLOY_FLAG_HAS_ROUTE:=0}" - -# Deployment yaml files -: "${CLOUDNESS_DEPLOY_YAML_COMMON:=}" -: "${CLOUDNESS_DEPLOY_YAML_VOLUME:=}" -: "${CLOUDNESS_DEPLOY_YAML_APP:=}" -: "${CLOUDNESS_DEPLOY_YAML_ROUTE:=}" - -# Define color variables -RED='\033[1;31m' # Bold Red -YELLOW='\033[1;33m' # Bold Yellow -GREEN='\033[1;32m' # Bold Green -RESET='\033[0m' # Resets color -CHECK_MARK='\u2714' # Unicode for Heavy Check Mark (✔) - -# Helper function for logging -error() { echo -e "${RED}[ERROR]${RESET} $*"; } -warn() { echo -e "${YELLOW}[WARN]${RESET} $*"; } -info() { echo -e "$*"; } -success() { echo -e "${GREEN}[SUCCESS]${RESET} $*"; } -success_info() { echo -e "$* ${GREEN}✔${RESET}"; } - -apply_kube_config_from_string() { - local KUBE_YAML_STRING="$1" - local ERROR_MESSAGE="" - - # Check if the YAML string is empty - if [ -z "$KUBE_YAML_STRING" ]; then - return 0 - fi - - # echo "Attempting to apply Kubernetes configuration..." - # echo -e "$KUBE_YAML_STRING" | sed 's/\t/ /g' - - # Apply the YAML using kubectl, capturing stderr into ERROR_MESSAGE - # And redirecting stdout to /dev/null to suppress success messages - if ! ERROR_MESSAGE=$(echo -e "$KUBE_YAML_STRING" | sed 's/\t/ /g' | kubectl apply -f - 2>&1 >/dev/null); then - error "Error applying Kubernetes configuration:" - error "$ERROR_MESSAGE" - return 1 # Indicate failure - else - return 0 # Indicate success - fi -} - -rollout_status() { - local ROLLOUT_ERROR="" - - if [ "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" = "Stateless" ]; then - if ! ROLLOUT_ERROR=$(kubectl rollout status deployment/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" --timeout=20s >&1 >/dev/null); then - error "$ROLLOUT_ERROR" - error "Error rolling out deployment, reverting..." - kubectl rollout undo deployment/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" - return 1 - fi - else - if ! ROLLOUT_ERROR=$(kubectl rollout status statefulset/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" --timeout=2m >&1 >/dev/null); then - error "$ROLLOUT_ERROR" - error "Error rolling out deployment, reverting..." - kubectl rollout undo statefulset/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" - return 1 - fi - fi -} - -cleanup() { - info "Running clean up..." - local CLEANUP_ERROR="" - - if [ "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" = "Stateless" ]; then - if ! CLEANUP_ERROR=$(kubectl delete statefulset/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" --ignore-not-found=true 2>&1 >/dev/null); then - error "$CLEANUP_ERROR" - warn "Error cleaning up deployment, Skipping..." - fi - else - if ! CLEANUP_ERROR=$(kubectl delete deployment/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" --ignore-not-found=true 2>&1 >/dev/null); then - error "$CLEANUP_ERROR" - warn "Error cleaning up deployment, Skipping..." - fi - fi -} - -wait_for_pvc_resize() { - PVC_NAME="$1" - NEW_SIZE="$2" - NAMESPACE="$CLOUDNESS_DEPLOY_APP_NAMESPACE" - - TIMEOUT_SECONDS="${TIMEOUT_SECONDS:-300}" # 5 minutes - SLEEP_SECONDS="${SLEEP_SECONDS:-5}" - - local current_time=$(date +%s) - local deadline=$(expr "$current_time" + "$TIMEOUT_SECONDS") - - check_resize_pending() { - kubectl get pvc "$PVC_NAME" -n "$NAMESPACE" -o jsonpath='{.status.conditions[?(@.type=="FileSystemResizePending")].status}' 2>/dev/null - } - - # Function to parse a storage string (e.g., "11Gi") and convert to GiB - parse_size_to_gib() { - local size_str=$1 - local value=$(echo "$size_str" | sed 's/Gi//') - # Use 'bc' for floating-point arithmetic if necessary - echo "$value" - } - - check_pvc_status() { - kubectl get pvc "$PVC_NAME" -n "$NAMESPACE" -o jsonpath='{.status.phase}' 2>/dev/null - } - - get_pvc_event() { - kubectl get events -n "$NAMESPACE" --field-selector involvedObject.kind=PersistentVolumeClaim,involvedObject.name="$PVC_NAME" --sort-by=.lastTimestamp -o jsonpath='{.items[-1:].reason}' 2>/dev/null - } - - while true; do - PVC_STATUS=$(check_pvc_status) - # Handle WaitForFirstConsumer specifically - if [ "$PVC_STATUS" == "Pending" ]; then - PVC_EVENT=$(get_pvc_event) - if [ "$PVC_EVENT" == "WaitForFirstConsumer" ]; then - return 0 - fi - fi - - CURRENT_SIZE=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.status.capacity.storage}') - if [ $(parse_size_to_gib "$CURRENT_SIZE") -ge $(parse_size_to_gib "$NEW_SIZE") ]; then - return 0 # Exit the function with success status - fi - - # Check for FileSystemResizePending condition - if [ "$(check_resize_pending)" == "True" ]; then - info "Volume has been resized. Remounting application to finalize resize." - return 0 - fi - - # Timeout - current_time=$(date +%s) - if [ "$current_time" -ge "$deadline" ]; then - info "⏱️ Timed out after ${TIMEOUT_SECONDS}s waiting for PVC '$PVC_NAME' to reach $NEW_SIZE." - return 1 - fi - - echo "Waiting for Volume..." - sleep "$SLEEP_SECONDS" - done -} - -################################################ MAIN ################################# - -# Applying common artifacts namespace, service account, role, rolebinding, configmap, secrets... -if apply_kube_config_from_string "$CLOUDNESS_DEPLOY_YAML_COMMON"; then - success_info "Setting up prequestic artifacts." -else - error "Error setting up prequestic artifacts. Exiting..." - return 1 -fi - -# Apply volume/pvc configuration -if [ "$CLOUDNESS_DEPLOY_FLAG_HAS_VOLUME" -eq 1 ]; then - if [ "$CLOUDNESS_DEPLOY_FLAG_NEED_REMOUNT" -eq 1 ]; then #if the volume is scaled up and needs remount for changes to apply - #delete the statefulset before scaling up the volume - if !(kubectl delete statefulset/"$CLOUDNESS_DEPLOY_APP_IDENTIFIER" -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" --ignore-not-found=true 2>&1 >/dev/null); then - error "Error cleaning up deployment, Skipping..." - return 1 - fi - fi - if apply_kube_config_from_string "$CLOUDNESS_DEPLOY_YAML_VOLUME"; then - # Iterate through each PVC found - PVC_DATA=$(echo -e "$CLOUDNESS_DEPLOY_YAML_VOLUME" | sed 's/\t/ /g' | yq -r 'select(.kind == "PersistentVolumeClaim") | .metadata.name + " " + .spec.resources.requests.storage' -) - echo "$PVC_DATA" | while read -r PVC_NAME NEW_SIZE; do - PVC_NAME=$(echo "$PVC_NAME" | xargs) - NEW_SIZE=$(echo "$NEW_SIZE" | xargs) - if [ -z "$PVC_NAME" ] || [ -z "$NEW_SIZE" ]; then #discard ---- and empty lines - continue - fi - - if ! wait_for_pvc_resize "$PVC_NAME" "$NEW_SIZE"; then - error "Failed to resize or confirm PVC '$PVC_NAME'. Check logs above for details." - return 1 - fi - done - - success_info "Volume provisioned." - else - "Error setting up volume. Exiting..." - return 1 - fi - -fi - -if ! apply_kube_config_from_string "$CLOUDNESS_DEPLOY_YAML_APP"; then - error "Error deploying application. Exiting..." - return 1 -fi - -if rollout_status; then - success_info "Application deployment." -else - #logs are handles in rollout_status method - return 1 -fi - -if [ "$CLOUDNESS_DEPLOY_FLAG_HAS_ROUTE" -eq 1 ]; then - if apply_kube_config_from_string "$CLOUDNESS_DEPLOY_YAML_ROUTE"; then - success_info "HTTP routes configured." - else - error "Error setting up http routes. Exiting..." - return 1 - fi -fi - -cleanup - -success "Deployment completed successfully." diff --git a/app/pipeline/convert/templates/templates.go b/app/pipeline/convert/templates/templates.go index 7485dee..54a32de 100644 --- a/app/pipeline/convert/templates/templates.go +++ b/app/pipeline/convert/templates/templates.go @@ -3,24 +3,27 @@ package templates import ( "bytes" "embed" - "strings" "text/template" ) //go:embed static/* var templateFs embed.FS -var statelessTmpl *template.Template -var statelessScripts *template.Template -var statefulTmpl *template.Template -var statefulScripts *template.Template +var ( + statelessTmpl *template.Template + statelessScripts *template.Template + statefulTmpl *template.Template + statefulScripts *template.Template +) // latest -var kubeCommon *template.Template -var kubePVC *template.Template -var kubeApp *template.Template -var kubeHttproute *template.Template -var kubeScripts string +var ( + kubeCommon *template.Template + kubePVC *template.Template + kubeApp *template.Template + kubeHttproute *template.Template + kubeScripts string +) func init() { statelessTmpl = getTemplate("stateless.yaml") @@ -32,7 +35,6 @@ func init() { kubePVC = getTemplate("2-pvc.yaml") kubeApp = getTemplate("3-app.yaml") kubeHttproute = getTemplate("4-httproute.yaml") - kubeScripts = getFileContent("kube-script.sh") } func getFileContent(fileName string) string { @@ -90,8 +92,7 @@ type ( } ) -func GenerateKubeTemplates(input *TemplateIn) (script string, common string, pvc string, app string, route string, err error) { - script = kubeScripts +func GenerateKubeTemplates(input *TemplateIn) (common string, pvc string, app string, route string, err error) { common, err = renderTemplate(kubeCommon, input) if err != nil { return @@ -141,10 +142,7 @@ func GenerateKubeStatefulTemplate(input *TemplateIn) (string, error) { } func formatTemplate(buf bytes.Buffer) string { - tmplStr := buf.String() - tmplStr = strings.ReplaceAll(tmplStr, "\n", "\\n") - tmplStr = strings.ReplaceAll(tmplStr, " ", "\\t") - return tmplStr + return buf.String() } func GenerateKubeStatelessScript(input *DeploymentIn) (string, error) { diff --git a/app/pipeline/convert/utils.go b/app/pipeline/convert/utils.go index 6b19149..2fbde9a 100644 --- a/app/pipeline/convert/utils.go +++ b/app/pipeline/convert/utils.go @@ -30,14 +30,6 @@ func getBuildVolumeMount(in *pipeline.RunnerContext) *pipeline.VolumeMount { } } -func getDeployVolumeMount(in *pipeline.RunnerContext) *pipeline.VolumeMount { - return &pipeline.VolumeMount{ - ID: getDeploymentWorkspaceVolumeId(in), - Path: wsDeployVolumePath, - Readonly: false, - } -} - // var replacer func replaceEnvVars(input string, vars map[string]*types.Variable) string { var builder strings.Builder @@ -61,3 +53,32 @@ func replaceEnvVars(input string, vars map[string]*types.Variable) string { return builder.String() } + +func addSecrets(ctx *pipeline.RunnerContext, step *pipeline.Step, vars map[string]string) { + for key, val := range vars { + addSecret(ctx, step, key, val) + } +} + +func addSecret(ctx *pipeline.RunnerContext, step *pipeline.Step, key string, value string) { + ctx.Secrets = append(ctx.Secrets, &pipeline.Secret{ + Name: key, + Data: value, + Mask: true, + }) + + step.Secrets = append(step.Secrets, &pipeline.SecretEnv{ + Key: key, + }) +} + +func addVariable(ctx *pipeline.RunnerContext, step *pipeline.Step, key string, value string) { + ctx.Variables = append(ctx.Variables, &pipeline.Variable{ + Name: key, + Value: value, + }) + + step.Variables = append(step.Variables, &pipeline.VariableEnv{ + Key: key, + }) +} diff --git a/app/pipeline/runner/engine/kubernetes/kubernetes.go b/app/pipeline/runner/engine/kubernetes/kubernetes.go index 88495c8..f29bed8 100644 --- a/app/pipeline/runner/engine/kubernetes/kubernetes.go +++ b/app/pipeline/runner/engine/kubernetes/kubernetes.go @@ -107,6 +107,11 @@ func (e *kube) Setup(rCtx context.Context, pCtx *pipeline.RunnerContext) error { return err } + _, err = e.client.CoreV1().ConfigMaps(e.nameSpace).Create(rCtx, toConfigMap(pCtx), metav1.CreateOptions{}) + if err != nil { + return err + } + _, err = e.client.CoreV1().Pods(e.nameSpace).Create(rCtx, toPod(e.nameSpace, pCtx), metav1.CreateOptions{}) if err != nil { return err @@ -301,6 +306,12 @@ func (e *kube) Destroy(ctx context.Context, pCtx *pipeline.RunnerContext) error return err } + // delete configmap + err = e.client.CoreV1().ConfigMaps(e.nameSpace).Delete(ctx, pCtx.RunnerName, metav1.DeleteOptions{}) + if err != nil { + return err + } + return e.client.CoreV1().Pods(e.nameSpace).Delete(ctx, pCtx.RunnerName, metav1.DeleteOptions{}) } diff --git a/app/pipeline/runner/engine/kubernetes/tokube.go b/app/pipeline/runner/engine/kubernetes/tokube.go index 7c904b2..d5dcb55 100644 --- a/app/pipeline/runner/engine/kubernetes/tokube.go +++ b/app/pipeline/runner/engine/kubernetes/tokube.go @@ -52,6 +52,25 @@ func toSecret(p *pipeline.RunnerContext) *v1.Secret { } } +func toConfigMap(p *pipeline.RunnerContext) *v1.ConfigMap { + stringData := make(map[string]string) + for _, s := range p.Variables { + stringData[s.Name] = s.Value + } + + // Add ConfigFiles to ConfigMap + for _, cf := range p.ConfigFiles { + stringData[cf.Key] = cf.Content + } + + return &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.RunnerName, + }, + Data: stringData, + } +} + func toPod(nameSpace string, p *pipeline.RunnerContext) *v1.Pod { resource := toResources(p) return &v1.Pod{ @@ -157,6 +176,20 @@ func toEnv(s *pipeline.Step, p *pipeline.RunnerContext) []v1.EnvVar { }, }) } + + for _, varEnv := range s.Variables { + envs = append(envs, v1.EnvVar{ + Name: varEnv.Key, + ValueFrom: &v1.EnvVarSource{ + ConfigMapKeyRef: &v1.ConfigMapKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: p.RunnerName, + }, + Key: varEnv.Key, + }, + }, + }) + } return envs } @@ -174,6 +207,21 @@ func toVolume(p *pipeline.RunnerContext) []v1.Volume { }, }) } + + // Add ConfigMap volume for file mounts if there are ConfigFiles + if len(p.ConfigFiles) > 0 { + volumes = append(volumes, v1.Volume{ + Name: "configfiles", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: p.RunnerName, + }, + }, + }, + }) + } + return volumes } @@ -186,6 +234,16 @@ func toVolumeMounts(s *pipeline.Step) []v1.VolumeMount { ReadOnly: v.Readonly, }) } + + // Add ConfigFileMount volumes (mounts ConfigMap files to specified path) + for _, cfm := range s.ConfigFileMounts { + volumeMounts = append(volumeMounts, v1.VolumeMount{ + Name: "configfiles", + MountPath: cfm.Path, + ReadOnly: true, + }) + } + return volumeMounts } diff --git a/app/pipeline/types.go b/app/pipeline/types.go index 36afdd7..dd9d2b3 100644 --- a/app/pipeline/types.go +++ b/app/pipeline/types.go @@ -37,21 +37,25 @@ type ( Steps []*Step InitSteps []*Step Secrets []*Secret + Variables []*Variable + ConfigFiles []*ConfigFile // Files to add to ConfigMap ResourcesLimit *ResourcesLimit } Step struct { - Name string - Image string - Command []string - Args []string - ScriptCommands []string - WorkingDir string - Envs map[string]string - VolumeMounts []*VolumeMount - Secrets []*SecretEnv - Privileged bool - RestartPolicy RestartPolicy + Name string + Image string + Command []string + Args []string + ScriptCommands []string + WorkingDir string + Envs map[string]string + VolumeMounts []*VolumeMount + Secrets []*SecretEnv + Variables []*VariableEnv + ConfigFileMounts []*ConfigFileMount // ConfigMap file mounts + Privileged bool + RestartPolicy RestartPolicy //housekeeping Liveness *Liveness @@ -63,10 +67,32 @@ type ( Mask bool } + Variable struct { + Name string + Value string + } + SecretEnv struct { Key string } + VariableEnv struct { + Key string + } + + // ConfigFile represents a file to be mounted from ConfigMap + ConfigFile struct { + Key string // Key in ConfigMap + Filename string // Filename when mounted + Content string // File content + } + + // ConfigFileMount represents a mounted ConfigMap as files + ConfigFileMount struct { + Path string // Mount path directory + Keys []string // Keys to mount as files + } + Volume struct { ID string Size int64 diff --git a/app/web/public/assets/styles.css b/app/web/public/assets/styles.css index f2c4a93..33eea1f 100644 --- a/app/web/public/assets/styles.css +++ b/app/web/public/assets/styles.css @@ -260,6 +260,9 @@ .pointer-events-none { pointer-events: none; } + .collapse { + visibility: collapse; + } .invisible { visibility: hidden; } @@ -340,6 +343,9 @@ .left-\[-18px\] { left: -18px; } + .isolate { + isolation: isolate; + } .z-10 { z-index: 10; } @@ -572,6 +578,9 @@ .hidden\! { display: none !important; } + .inline { + display: inline; + } .inline-block { display: inline-block; } diff --git a/plugins/builder/Dockerfile b/plugins/builder/Dockerfile deleted file mode 100644 index db238dd..0000000 --- a/plugins/builder/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# Versions -# https://hub.docker.com/_/alpine -ARG BASE_IMAGE=moby/buildkit:rootless -# https://github.com/railwayapp/nixpacks/releases -ARG NIXPACKS_VERSION=1.33.0 -# https://dl.k8s.io/release/stable.txt -ARG KUBECTL_VERSION=latest - -ARG TARGETPLATFORM -ARG NIXPACKS_VERSION -ARG KUBECTL_VERSION - -FROM bitnami/kubectl:${KUBECTL_VERSION} AS kubectl -FROM ${BASE_IMAGE} AS base - - -USER root -RUN apk update && apk add --no-cache \ - bash curl git git-lfs openssh-client tar tini musl ca-certificates openssh-client yq \ - && rm -rf /var/cache/apk/* - -RUN curl -sSL https://nixpacks.com/install.sh | bash - -COPY --from=kubectl /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/kubectl -RUN chmod +x /usr/local/bin/kubectl - -ENV PATH="/usr/local/bin:${PATH}" -USER user -ENTRYPOINT ["/sbin/tini", "--"] \ No newline at end of file diff --git a/plugins/builder/readme.md b/plugins/builder/readme.md deleted file mode 100644 index 9c0069e..0000000 --- a/plugins/builder/readme.md +++ /dev/null @@ -1,3 +0,0 @@ -to build builder - -sudo docker build . -t cloudnessio/builder:51 --build-arg TARGETPLATFORM="'linux/amd64'" diff --git a/plugins/deployer/main.go b/plugins/deployer/main.go deleted file mode 100644 index c5b00dd..0000000 --- a/plugins/deployer/main.go +++ /dev/null @@ -1,9 +0,0 @@ -package main - -import ( - "fmt" -) - -func main() { - fmt.Println("main") -} diff --git a/plugins/helper/Dockerfile b/plugins/helper/Dockerfile new file mode 100644 index 0000000..2fda868 --- /dev/null +++ b/plugins/helper/Dockerfile @@ -0,0 +1,50 @@ +# Versions +# https://hub.docker.com/_/alpine +ARG BASE_IMAGE=moby/buildkit:rootless +# https://github.com/railwayapp/nixpacks/releases +ARG NIXPACKS_VERSION=1.33.0 +# https://dl.k8s.io/release/stable.txt +ARG KUBECTL_VERSION=latest + +ARG TARGETPLATFORM +ARG NIXPACKS_VERSION +ARG KUBECTL_VERSION + +# Build the deployer binary (has its own go.mod) +FROM golang:1.25-alpine AS deployer-builder +WORKDIR /build +COPY ./deployer/go.mod ./deployer/go.sum ./ +RUN go mod download +COPY ./deployer/*.go ./ +RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o ./deployer . + +FROM bitnami/kubectl:${KUBECTL_VERSION} AS kubectl +FROM ${BASE_IMAGE} AS base + + +USER root +RUN apk update && apk add --no-cache \ + bash curl git git-lfs openssh-client tar tini musl ca-certificates openssh-client yq \ + && rm -rf /var/cache/apk/* + +RUN curl -sSL https://nixpacks.com/install.sh | bash + +COPY --from=kubectl /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/kubectl +RUN chmod +x /usr/local/bin/kubectl + +# Add deployer binary +COPY --from=deployer-builder /build/deployer /usr/local/bin/cloudness-deploy +RUN chmod +x /usr/local/bin/cloudness-deploy + +# Add shared utilities and scripts +COPY scripts/cloudness-utils.sh /usr/local/lib/cloudness-utils.sh +COPY scripts/init-script.sh /usr/local/lib/init-script.sh +COPY scripts/build-script.sh /usr/local/lib/build-script.sh +COPY scripts/deploy-script.sh /usr/local/lib/deploy-script.sh +RUN chmod +x /usr/local/lib/cloudness-utils.sh /usr/local/lib/init-script.sh /usr/local/lib/build-script.sh /usr/local/lib/deploy-script.sh + +# Auto-source utilities for all sh scripts +ENV ENV="/usr/local/lib/cloudness-utils.sh" +ENV PATH="/usr/local/bin:${PATH}" +USER user +ENTRYPOINT ["/sbin/tini", "--"] \ No newline at end of file diff --git a/plugins/helper/deployer/config.go b/plugins/helper/deployer/config.go new file mode 100644 index 0000000..5eafa3f --- /dev/null +++ b/plugins/helper/deployer/config.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "os" + "strconv" +) + +// Config holds all deployment configuration from environment variables +type Config struct { + // Application identifiers + AppIdentifier string + AppNamespace string + AppType AppType // "Stateless" or "Stateful" + + // Feature flags + HasVolume bool + HasRoute bool + NeedRemount bool + + // YAML file paths + DeployPath string + CommonYAMLPath string + VolumeYAMLPath string + AppYAMLPath string + RouteYAMLPath string + + // Timeouts + RolloutTimeoutStateless int // seconds + RolloutTimeoutStateful int // seconds + PVCResizeTimeout int // seconds + PVCResizePollInterval int // seconds + + // Options + Verbose bool +} + +type AppType string + +const ( + AppTypeStateless AppType = "Stateless" + AppTypeStateful AppType = "Stateful" +) + +// LoadConfigFromEnv loads configuration from environment variables +func LoadConfigFromEnv() (*Config, error) { + cfg := &Config{ + AppIdentifier: os.Getenv("CLOUDNESS_DEPLOY_APP_IDENTIFIER"), + AppNamespace: os.Getenv("CLOUDNESS_DEPLOY_APP_NAMESPACE"), + AppType: AppType(os.Getenv("CLOUDNESS_DEPLOY_FLAG_APP_TYPE")), + DeployPath: os.Getenv("CLOUDNESS_DEPLOY_PATH"), + HasVolume: os.Getenv("CLOUDNESS_DEPLOY_FLAG_HAS_VOLUME") == "1", + HasRoute: os.Getenv("CLOUDNESS_DEPLOY_FLAG_HAS_ROUTE") == "1", + NeedRemount: os.Getenv("CLOUDNESS_DEPLOY_FLAG_NEED_REMOUNT") == "1", + Verbose: os.Getenv("VERBOSE") == "true", + + // Defaults + RolloutTimeoutStateless: getEnvInt("ROLLOUT_TIMEOUT_STATELESS", 60), + RolloutTimeoutStateful: getEnvInt("ROLLOUT_TIMEOUT_STATEFUL", 120), + PVCResizeTimeout: getEnvInt("PVC_RESIZE_TIMEOUT", 300), + PVCResizePollInterval: getEnvInt("PVC_RESIZE_POLL_INTERVAL", 5), + } + + // Validate required fields + if cfg.AppIdentifier == "" { + return nil, fmt.Errorf("CLOUDNESS_DEPLOY_APP_IDENTIFIER is required") + } + if cfg.AppNamespace == "" { + return nil, fmt.Errorf("CLOUDNESS_DEPLOY_APP_NAMESPACE is required") + } + if cfg.AppType != AppTypeStateless && cfg.AppType != AppTypeStateful { + return nil, fmt.Errorf("CLOUDNESS_DEPLOY_FLAG_APP_TYPE must be 'Stateless' or 'Stateful', got '%s'", cfg.AppType) + } + if cfg.DeployPath == "" { + return nil, fmt.Errorf("CLOUDNESS_DEPLOY_PATH is required") + } + + // Set YAML file paths + cfg.CommonYAMLPath = cfg.DeployPath + "/common.yaml" + cfg.VolumeYAMLPath = cfg.DeployPath + "/volume.yaml" + cfg.AppYAMLPath = cfg.DeployPath + "/app.yaml" + cfg.RouteYAMLPath = cfg.DeployPath + "/route.yaml" + + return cfg, nil +} + +func getEnvInt(key string, defaultVal int) int { + if v := os.Getenv(key); v != "" { + if i, err := strconv.Atoi(v); err == nil { + return i + } + } + return defaultVal +} + +// ResourceType returns the Kubernetes resource type for this app +func (c *Config) ResourceType() string { + if c.AppType == AppTypeStateless { + return "Deployment" + } + return "StatefulSet" +} + +// OppositeResourceType returns the opposite resource type (for cleanup) +func (c *Config) OppositeResourceType() string { + if c.AppType == AppTypeStateless { + return "StatefulSet" + } + return "Deployment" +} + +// RolloutTimeout returns the appropriate timeout based on app type +func (c *Config) RolloutTimeout() int { + if c.AppType == AppTypeStateless { + return c.RolloutTimeoutStateless + } + return c.RolloutTimeoutStateful +} diff --git a/plugins/helper/deployer/deployer.go b/plugins/helper/deployer/deployer.go new file mode 100644 index 0000000..db918b6 --- /dev/null +++ b/plugins/helper/deployer/deployer.go @@ -0,0 +1,81 @@ +package main + +import ( + "context" + "fmt" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// Deployer handles Kubernetes deployments using hybrid approach: +// - kubectl for applying manifests (battle-tested) +// - client-go for watching/monitoring (efficient) +type Deployer struct { + config *Config + Operator Operator + log *Logger +} + +// NewDeployer creates a new deployer instance +func NewDeployer(cfg *Config) (*Deployer, error) { + // Get in-cluster config + restConfig, err := rest.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to get in-cluster config: %w", err) + } + + // Create clientset for watching + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("failed to create kubernetes client: %w", err) + } + + logger := NewLogger(cfg.Verbose) + var operator Operator + switch cfg.AppType { + case AppTypeStateless: + operator = NewStatelessOperator(cfg, clientset, logger) + case AppTypeStateful: + operator = NewStatefulOperator(cfg, clientset, logger) + } + + return &Deployer{ + config: cfg, + Operator: operator, + log: logger, + }, nil +} + +// Deploy runs the full deployment workflow +func (k *Deployer) Deploy(ctx context.Context) error { + k.log.Section("Deploying application") + + // Step 1: Apply common artifacts (namespace, service account, etc.) + if err := k.Operator.ApplyCommon(ctx); err != nil { + return err + } + k.log.Step("Access control and storage artifacts configured") + + // Step 2: Handle volumes (if any) + if err := k.Operator.Volumes(ctx); err != nil { + return err + } + + // Step 3: Deploy application + if err := k.Operator.Deploy(ctx); err != nil { + return err + } + k.log.Step("Application deployed") + + // Step 4: Configure routes (if any) + if err := k.Operator.Ingress(ctx); err != nil { + return err + } + + // Cleanup: Remove opposite resource type + k.Operator.Cleanup(ctx) + + k.log.Success("Deployment completed successfully!") + return nil +} diff --git a/plugins/helper/deployer/go.mod b/plugins/helper/deployer/go.mod new file mode 100644 index 0000000..8b354ed --- /dev/null +++ b/plugins/helper/deployer/go.mod @@ -0,0 +1,65 @@ +module github.com/cloudness-io/cloudness/plugins/helper/deployer + +go 1.25 + +require ( + k8s.io/api v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/client-go v0.34.1 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.22.0 // indirect + github.com/go-openapi/jsonreference v0.21.1 // indirect + github.com/go-openapi/swag v0.24.1 // indirect + github.com/go-openapi/swag/cmdutils v0.24.0 // indirect + github.com/go-openapi/swag/conv v0.24.0 // indirect + github.com/go-openapi/swag/fileutils v0.24.0 // indirect + github.com/go-openapi/swag/jsonname v0.24.0 // indirect + github.com/go-openapi/swag/jsonutils v0.24.0 // indirect + github.com/go-openapi/swag/loading v0.24.0 // indirect + github.com/go-openapi/swag/mangling v0.24.0 // indirect + github.com/go-openapi/swag/netutils v0.24.0 // indirect + github.com/go-openapi/swag/stringutils v0.24.0 // indirect + github.com/go-openapi/swag/typeutils v0.24.0 // indirect + github.com/go-openapi/swag/yamlutils v0.24.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/pprof v0.0.0-20250903194437-c28834ac2320 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + github.com/onsi/gomega v1.36.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.31.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.13.0 // indirect + google.golang.org/protobuf v1.36.9 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/plugins/helper/deployer/go.sum b/plugins/helper/deployer/go.sum new file mode 100644 index 0000000..4ad6e71 --- /dev/null +++ b/plugins/helper/deployer/go.sum @@ -0,0 +1,166 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.22.0 h1:TmMhghgNef9YXxTu1tOopo+0BGEytxA+okbry0HjZsM= +github.com/go-openapi/jsonpointer v0.22.0/go.mod h1:xt3jV88UtExdIkkL7NloURjRQjbeUgcxFblMjq2iaiU= +github.com/go-openapi/jsonreference v0.21.1 h1:bSKrcl8819zKiOgxkbVNRUBIr6Wwj9KYrDbMjRs0cDA= +github.com/go-openapi/jsonreference v0.21.1/go.mod h1:PWs8rO4xxTUqKGu+lEvvCxD5k2X7QYkKAepJyCmSTT8= +github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8= +github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A= +github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I= +github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8= +github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFivx54ik= +github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c= +github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak= +github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90= +github.com/go-openapi/swag/jsonname v0.24.0 h1:2wKS9bgRV/xB8c62Qg16w4AUiIrqqiniJFtZGi3dg5k= +github.com/go-openapi/swag/jsonname v0.24.0/go.mod h1:GXqrPzGJe611P7LG4QB9JKPtUZ7flE4DOVechNaDd7Q= +github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts= +github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0= +github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc= +github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk= +github.com/go-openapi/swag/mangling v0.24.0 h1:PGOQpViCOUroIeak/Uj/sjGAq9LADS3mOyjznmHy2pk= +github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc= +github.com/go-openapi/swag/netutils v0.24.0 h1:Bz02HRjYv8046Ycg/w80q3g9QCWeIqTvlyOjQPDjD8w= +github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM= +github.com/go-openapi/swag/stringutils v0.24.0 h1:i4Z/Jawf9EvXOLUbT97O0HbPUja18VdBxeadyAqS1FM= +github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w= +github.com/go-openapi/swag/typeutils v0.24.0 h1:d3szEGzGDf4L2y1gYOSSLeK6h46F+zibnEas2Jm/wIw= +github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI= +github.com/go-openapi/swag/yamlutils v0.24.0 h1:bhw4894A7Iw6ne+639hsBNRHg9iZg/ISrOVr+sJGp4c= +github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250903194437-c28834ac2320 h1:c7ayAhbRP9HnEl/hg/WQOM9s0snWztfW6feWXZbGHw0= +github.com/google/pprof v0.0.0-20250903194437-c28834ac2320/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/plugins/helper/deployer/kubectl.go b/plugins/helper/deployer/kubectl.go new file mode 100644 index 0000000..5982ad2 --- /dev/null +++ b/plugins/helper/deployer/kubectl.go @@ -0,0 +1,113 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + "time" +) + +type Kubectl struct { + config *Config + log *Logger +} + +func NewKubectl(cfg *Config, log *Logger) *Kubectl { + return &Kubectl{ + config: cfg, + log: log, + } +} + +// ============================================================================= +// kubectl-based Apply +// ============================================================================= + +// ApplyYAMLFile applies a YAML file using kubectl +func (k *Kubectl) ApplyYAMLFile(ctx context.Context, filepath string) error { + // Check if file exists and has content + info, err := os.Stat(filepath) + if os.IsNotExist(err) || (err == nil && info.Size() == 0) { + k.log.Debug("Skipping empty or non-existent file: %s", filepath) + return nil + } + if err != nil { + return fmt.Errorf("failed to stat file %s: %w", filepath, err) + } + + // Use kubectl apply with retry + return k.kubectlApplyWithRetry(ctx, filepath, 3) +} + +// kubectlApplyWithRetry runs kubectl apply with exponential backoff +func (k *Kubectl) kubectlApplyWithRetry(ctx context.Context, filepath string, maxRetries int) error { + var lastErr error + backoff := 1 * time.Second + + for attempt := 0; attempt <= maxRetries; attempt++ { + if attempt > 0 { + k.log.Debug("Retry %d/%d after %v...", attempt, maxRetries, backoff) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(backoff): + } + backoff *= 2 + } + + err := k.kubectlApply(ctx, filepath) + if err == nil { + return nil + } + + lastErr = err + + // Don't retry on certain errors + if strings.Contains(err.Error(), "invalid") || + strings.Contains(err.Error(), "forbidden") || + strings.Contains(err.Error(), "not found") { + return err + } + } + + return fmt.Errorf("kubectl apply failed after %d retries: %w", maxRetries, lastErr) +} + +// kubectlApply runs kubectl apply -f +func (k *Kubectl) kubectlApply(ctx context.Context, filepath string) error { + args := []string{"apply", "-f", filepath} + + if k.config.Verbose { + k.log.Debug("kubectl %s", strings.Join(args, " ")) + } + + cmd := exec.CommandContext(ctx, "kubectl", args...) + output, err := cmd.CombinedOutput() + + if k.config.Verbose && len(output) > 0 { + k.log.Info("%s", strings.TrimSpace(string(output))) + } + + if err != nil { + return fmt.Errorf("kubectl apply failed: %s: %w", strings.TrimSpace(string(output)), err) + } + + return nil +} + +// Delete runs kubectl delete +func (k *Kubectl) Delete(ctx context.Context, resource, name, namespace string) error { + args := []string{"delete", resource, name, "-n", namespace, "--ignore-not-found=true"} + + cmd := exec.CommandContext(ctx, "kubectl", args...) + output, err := cmd.CombinedOutput() + + if err != nil { + k.log.Debug("kubectl delete failed: %s", strings.TrimSpace(string(output))) + return err + } + + return nil +} diff --git a/plugins/helper/deployer/logger.go b/plugins/helper/deployer/logger.go new file mode 100644 index 0000000..621293b --- /dev/null +++ b/plugins/helper/deployer/logger.go @@ -0,0 +1,66 @@ +package main + +import ( + "fmt" + "os" +) + +// Logger provides structured logging with colors +type Logger struct { + verbose bool +} + +// ANSI color codes +const ( + colorRed = "\033[1;31m" + colorGreen = "\033[1;32m" + colorYellow = "\033[1;33m" + colorBlue = "\033[38;2;40;153;245m" + colorReset = "\033[0m" +) + +// NewLogger creates a new logger +func NewLogger(verbose bool) *Logger { + return &Logger{verbose: verbose} +} + +// Error prints an error message +func (l *Logger) Error(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, colorRed+"❌ "+format+colorReset+"\n", args...) +} + +// Warn prints a warning message +func (l *Logger) Warn(format string, args ...interface{}) { + fmt.Printf(colorYellow+"⚠️ "+format+colorReset+"\n", args...) +} + +// Info prints an info message +func (l *Logger) Info(format string, args ...interface{}) { + fmt.Printf(format+"\n", args...) +} + +// Success prints a success message +func (l *Logger) Success(format string, args ...interface{}) { + fmt.Printf(colorGreen+"✔ "+format+colorReset+"\n", args...) +} + +// Step prints a step completion message +func (l *Logger) Step(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + fmt.Printf("%s "+colorGreen+"✔"+colorReset+"\n", msg) +} + +// Debug prints a debug message (only if verbose) +func (l *Logger) Debug(format string, args ...interface{}) { + if l.verbose { + fmt.Printf(colorBlue+"[DEBUG] "+format+colorReset+"\n", args...) + } +} + +// Section prints a section header +func (l *Logger) Section(title string) { + fmt.Println() + fmt.Println(colorBlue + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + colorReset) + fmt.Printf(colorBlue+" %s"+colorReset+"\n", title) + fmt.Println(colorBlue + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + colorReset) +} diff --git a/plugins/helper/deployer/main.go b/plugins/helper/deployer/main.go new file mode 100644 index 0000000..6cda0dc --- /dev/null +++ b/plugins/helper/deployer/main.go @@ -0,0 +1,45 @@ +package main + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +func main() { + log := NewLogger(os.Getenv("VERBOSE") == "true") + + // Load configuration + cfg, err := LoadConfigFromEnv() + if err != nil { + log.Error("Configuration error: %v", err) + os.Exit(1) + } + + // Create deployer + deployer, err := NewDeployer(cfg) + if err != nil { + log.Error("Failed to initialize deployer: %v", err) + os.Exit(1) + } + + // Setup context with cancellation + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Handle signals + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + log.Warn("Received shutdown signal") + cancel() + }() + + // Run deployment + if err := deployer.Deploy(ctx); err != nil { + log.Error("Deployment failed: %v", err) + os.Exit(1) + } +} diff --git a/plugins/helper/deployer/operator.go b/plugins/helper/deployer/operator.go new file mode 100644 index 0000000..0c58798 --- /dev/null +++ b/plugins/helper/deployer/operator.go @@ -0,0 +1,20 @@ +package main + +import "context" + +type Operator interface { + // ApplyCommon applies common artifact files + ApplyCommon(ctx context.Context) error + + // Volumes handles PVC creation and resizing + Volumes(ctx context.Context) error + + // Deploy deploys the actual application + Deploy(ctx context.Context) error + + // Ingress deploys the ingress + Ingress(ctx context.Context) error + + // Cleanup cleans up resources + Cleanup(ctx context.Context) +} diff --git a/plugins/helper/deployer/operator_base.go b/plugins/helper/deployer/operator_base.go new file mode 100644 index 0000000..b5e92d6 --- /dev/null +++ b/plugins/helper/deployer/operator_base.go @@ -0,0 +1,69 @@ +package main + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" +) + +type BaseOpeator struct { + config *Config + kubectl *Kubectl + log *Logger +} + +func NewBaseOperator(config *Config, kubectl *Kubectl, log *Logger) *BaseOpeator { + return &BaseOpeator{ + config: config, + kubectl: kubectl, + log: log, + } +} + +func (b *BaseOpeator) ApplyCommon(ctx context.Context) error { + if err := b.kubectl.ApplyYAMLFile(ctx, b.config.CommonYAMLPath); err != nil { + return fmt.Errorf("failed to apply common artifacts: %w", err) + } + return nil +} + +func (b *BaseOpeator) ApplyIngress(ctx context.Context) error { + if b.config.HasRoute { + if err := b.kubectl.ApplyYAMLFile(ctx, b.config.RouteYAMLPath); err != nil { + return fmt.Errorf("failed to deploy routes: %w", err) + } + b.log.Step("HTTP routes configured") + } + return nil +} + +// utils +func isDeploymentReady(deploy *appsv1.Deployment) bool { + if deploy.Generation != deploy.Status.ObservedGeneration { + return false + } + + if deploy.Spec.Replicas == nil { + return false + } + + replicas := *deploy.Spec.Replicas + return deploy.Status.UpdatedReplicas == replicas && + deploy.Status.ReadyReplicas == replicas && + deploy.Status.AvailableReplicas == replicas +} + +func isStatefulSetReady(sts *appsv1.StatefulSet) bool { + if sts.Generation != sts.Status.ObservedGeneration { + return false + } + + if sts.Spec.Replicas == nil { + return false + } + + replicas := *sts.Spec.Replicas + return sts.Status.UpdatedReplicas == replicas && + sts.Status.ReadyReplicas == replicas +} diff --git a/plugins/helper/deployer/operator_statefull.go b/plugins/helper/deployer/operator_statefull.go new file mode 100644 index 0000000..3febf34 --- /dev/null +++ b/plugins/helper/deployer/operator_statefull.go @@ -0,0 +1,275 @@ +package main + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +type StatefulOperator struct { + config *Config + base *BaseOpeator + clientset kubernetes.Interface + kubectl *Kubectl + log *Logger +} + +func NewStatefulOperator(cfg *Config, clientset kubernetes.Interface, log *Logger) *StatefulOperator { + kubectl := NewKubectl(cfg, log) + baseOperator := NewBaseOperator(cfg, kubectl, log) + return &StatefulOperator{ + config: cfg, + base: baseOperator, + clientset: clientset, + kubectl: kubectl, + log: log, + } +} + +func (s *StatefulOperator) ApplyCommon(ctx context.Context) error { + return s.base.ApplyCommon(ctx) +} + +func (s *StatefulOperator) Volumes(ctx context.Context) error { + if s.config.HasVolume { + if err := s.deployVolumes(ctx); err != nil { + return fmt.Errorf("failed to deploy volumes: %w", err) + } + s.log.Step("Volumes provisioned") + } + return nil +} + +func (k *StatefulOperator) Deploy(ctx context.Context) error { + if err := k.kubectl.ApplyYAMLFile(ctx, k.config.AppYAMLPath); err != nil { + return err + } + + return k.waitForRollout(ctx) +} + +func (s *StatefulOperator) Ingress(ctx context.Context) error { return s.base.ApplyIngress(ctx) } + +func (k *StatefulOperator) Cleanup(ctx context.Context) { + k.log.Debug("Running cleanup...") + + err := k.kubectl.Delete(ctx, "deployment", k.config.AppIdentifier, k.config.AppNamespace) + if err != nil && !errors.IsNotFound(err) { + k.log.Debug("Cleanup: %v", err) + } +} + +// waitForRollout waits for the statefulset to roll out using watch +func (k *StatefulOperator) waitForRollout(ctx context.Context) error { + timeout := time.Duration(k.config.RolloutTimeout()) * time.Second + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + return k.watchStatefulSetRollout(ctx) +} + +// watchStatefulSetRollout watches a StatefulSet until it's ready +func (k *StatefulOperator) watchStatefulSetRollout(ctx context.Context) error { + // Get initial state + sts, err := k.clientset.AppsV1().StatefulSets(k.config.AppNamespace).Get(ctx, k.config.AppIdentifier, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get statefulset: %w", err) + } + + if isStatefulSetReady(sts) { + return nil + } + + // Watch for changes + watcher, err := k.clientset.AppsV1().StatefulSets(k.config.AppNamespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", k.config.AppIdentifier), + }) + if err != nil { + return fmt.Errorf("failed to watch statefulset: %w", err) + } + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + k.log.Error("Rollout timed out") + return fmt.Errorf("statefulset rollout timed out") + case event, ok := <-watcher.ResultChan(): + if !ok { + return fmt.Errorf("watch channel closed") + } + + if event.Type == watch.Error { + continue + } + + sts, ok := event.Object.(*appsv1.StatefulSet) + if !ok { + continue + } + + k.log.Debug("StatefulSet %s: %d/%d ready", + sts.Name, + sts.Status.ReadyReplicas, + *sts.Spec.Replicas) + + if isStatefulSetReady(sts) { + return nil + } + } + } +} + +// ============================================================================= +// Volume Handling (client-go for PVC status checking) +// ============================================================================= + +// deployVolumes handles PVC creation and resizing +func (k *StatefulOperator) deployVolumes(ctx context.Context) error { + // Handle remount for volume resize + if k.config.NeedRemount { + k.log.Info("Volume resize detected, removing statefulset for remount...") + _ = k.kubectl.Delete(ctx, "statefulset", k.config.AppIdentifier, k.config.AppNamespace) + } + + // Apply volume configuration + if err := k.kubectl.ApplyYAMLFile(ctx, k.config.VolumeYAMLPath); err != nil { + return err + } + + // Wait for PVCs using watch (efficient) + return k.waitForPVCs(ctx) +} + +// waitForPVCs waits for all PVCs to be bound or resized +func (k *StatefulOperator) waitForPVCs(ctx context.Context) error { + // List PVCs with our label + pvcs, err := k.clientset.CoreV1().PersistentVolumeClaims(k.config.AppNamespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app.kubernetes.io/instance=%s", k.config.AppIdentifier), + }) + if err != nil { + return fmt.Errorf("failed to list PVCs: %w", err) + } + + if len(pvcs.Items) == 0 { + return nil + } + + timeout := time.Duration(k.config.PVCResizeTimeout) * time.Second + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for _, pvc := range pvcs.Items { + if err := k.waitForPVC(ctx, pvc.Name); err != nil { + return err + } + } + + return nil +} + +// waitForPVC waits for a single PVC to be ready using watch +func (k *StatefulOperator) waitForPVC(ctx context.Context, name string) error { + // First check current status + pvc, err := k.clientset.CoreV1().PersistentVolumeClaims(k.config.AppNamespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get PVC %s: %w", name, err) + } + + if k.isPVCReady(pvc) { + return nil + } + + // Watch for changes + watcher, err := k.clientset.CoreV1().PersistentVolumeClaims(k.config.AppNamespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", name), + }) + if err != nil { + return fmt.Errorf("failed to watch PVC %s: %w", name, err) + } + defer watcher.Stop() + + k.log.Info("Waiting for volume...", name) + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for PVC %s", name) + case event, ok := <-watcher.ResultChan(): + if !ok { + return fmt.Errorf("watch channel closed for PVC %s", name) + } + + if event.Type == watch.Error { + continue + } + + pvc, ok := event.Object.(*corev1.PersistentVolumeClaim) + if !ok { + continue + } + + k.log.Debug("PVC %s status: %s", name, pvc.Status.Phase) + + if k.isPVCReady(pvc) { + return nil + } + } + } +} + +// isPVCReady checks if a PVC is ready for use +func (k *StatefulOperator) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool { + // Bound is ready + if pvc.Status.Phase == corev1.ClaimBound { + return true + } + + // For Pending PVCs, check if they're waiting for first consumer + if pvc.Status.Phase == corev1.ClaimPending { + // Check events to see if it's WaitForFirstConsumer + if k.hasWaitForFirstConsumerEvent(pvc.Name) { + return true + } + } + + // FileSystemResizePending - volume resized, waiting for pod remount + for _, cond := range pvc.Status.Conditions { + if cond.Type == corev1.PersistentVolumeClaimFileSystemResizePending && cond.Status == corev1.ConditionTrue { + k.log.Info("Volume %s resized. Remounting application to finalize.", pvc.Name) + return true + } + } + + return false +} + +// hasWaitForFirstConsumerEvent checks if the PVC has a WaitForFirstConsumer event +func (k *StatefulOperator) hasWaitForFirstConsumerEvent(pvcName string) bool { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + events, err := k.clientset.CoreV1().Events(k.config.AppNamespace).List(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.kind=PersistentVolumeClaim,involvedObject.name=%s", pvcName), + }) + if err != nil { + k.log.Debug("Failed to get events for PVC %s: %v", pvcName, err) + return false + } + + // Check if any event has WaitForFirstConsumer reason + for _, event := range events.Items { + if event.Reason == "WaitForFirstConsumer" { + return true + } + } + + return false +} diff --git a/plugins/helper/deployer/operator_stateless.go b/plugins/helper/deployer/operator_stateless.go new file mode 100644 index 0000000..c180e51 --- /dev/null +++ b/plugins/helper/deployer/operator_stateless.go @@ -0,0 +1,146 @@ +package main + +import ( + "context" + "fmt" + "os/exec" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +type StatelessOperator struct { + config *Config + base *BaseOpeator + clientset kubernetes.Interface + kubectl *Kubectl + log *Logger +} + +func NewStatelessOperator(cfg *Config, clientset kubernetes.Interface, log *Logger) *StatelessOperator { + kubectl := NewKubectl(cfg, log) + baseOperator := NewBaseOperator(cfg, kubectl, log) + return &StatelessOperator{ + config: cfg, + base: baseOperator, + clientset: clientset, + kubectl: kubectl, + log: log, + } +} + +func (s *StatelessOperator) ApplyCommon(ctx context.Context) error { + return s.base.ApplyCommon(ctx) +} + +func (s *StatelessOperator) Volumes(ctx context.Context) error { + return nil +} + +func (s *StatelessOperator) Deploy(ctx context.Context) error { + if err := s.kubectl.ApplyYAMLFile(ctx, s.config.AppYAMLPath); err != nil { + return err + } + + return s.waitForRollout(ctx) +} + +func (s *StatelessOperator) Ingress(ctx context.Context) error { return s.base.ApplyIngress(ctx) } + +func (k *StatelessOperator) Cleanup(ctx context.Context) { + k.log.Debug("Running cleanup...") + + err := k.kubectl.Delete(ctx, "statefulset", k.config.AppIdentifier, k.config.AppNamespace) + if err != nil && !errors.IsNotFound(err) { + k.log.Debug("Cleanup: %v", err) + } +} + +// waitForRollout waits for the deployment to roll out using watch +func (s *StatelessOperator) waitForRollout(ctx context.Context) error { + timeout := time.Duration(s.config.RolloutTimeout()) * time.Second + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + return s.watchDeploymentRollout(ctx) +} + +// watchDeploymentRollout watches a Deployment until it's ready +func (s *StatelessOperator) watchDeploymentRollout(ctx context.Context) error { + // Get initial state + deploy, err := s.clientset.AppsV1().Deployments(s.config.AppNamespace).Get(ctx, s.config.AppIdentifier, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment: %w", err) + } + + if isDeploymentReady(deploy) { + return nil + } + + // Watch for changes + watcher, err := s.clientset.AppsV1().Deployments(s.config.AppNamespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", s.config.AppIdentifier), + }) + if err != nil { + return fmt.Errorf("failed to watch deployment: %w", err) + } + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + s.log.Error("Rollout timed out, reverting...") + s.rollbackDeployment(context.Background()) + return fmt.Errorf("deployment rollout timed out") + case event, ok := <-watcher.ResultChan(): + if !ok { + return fmt.Errorf("watch channel closed") + } + + if event.Type == watch.Error { + continue + } + + deploy, ok := event.Object.(*appsv1.Deployment) + if !ok { + continue + } + + s.log.Debug("Deployment %s: %d/%d ready", + deploy.Name, + deploy.Status.ReadyReplicas, + *deploy.Spec.Replicas) + + if isDeploymentReady(deploy) { + return nil + } + + // Check for failure conditions + for _, cond := range deploy.Status.Conditions { + if cond.Type == appsv1.DeploymentProgressing && cond.Status == corev1.ConditionFalse { + s.log.Error("Deployment failed: %s", cond.Message) + s.rollbackDeployment(context.Background()) + return fmt.Errorf("deployment failed: %s", cond.Message) + } + } + } + } +} + +// rollbackDeployment rolls back a failed deployment using kubectl +func (k *StatelessOperator) rollbackDeployment(ctx context.Context) { + args := []string{"rollout", "undo", "deployment", k.config.AppIdentifier, "-n", k.config.AppNamespace} + cmd := exec.CommandContext(ctx, "kubectl", args...) + output, err := cmd.CombinedOutput() + if err != nil { + k.log.Error("Failed to rollback: %s", strings.TrimSpace(string(output))) + } else { + k.log.Info("Rolled back deployment") + } +} diff --git a/plugins/helper/readme.md b/plugins/helper/readme.md new file mode 100644 index 0000000..95d30ff --- /dev/null +++ b/plugins/helper/readme.md @@ -0,0 +1,3 @@ +to build helper + +sudo docker build . -t cloudnessio/helper:51 --build-arg TARGETPLATFORM="'linux/amd64'" diff --git a/plugins/helper/scripts/build-script.sh b/plugins/helper/scripts/build-script.sh new file mode 100644 index 0000000..56d3106 --- /dev/null +++ b/plugins/helper/scripts/build-script.sh @@ -0,0 +1,231 @@ +#!/bin/sh + +# Cloudness Build Script +# This script handles container image building using Dockerfile or Nixpacks + +set -eu + +# ============================================================================== +# Configuration +# ============================================================================== + +# Build type: "dockerfile" or "nixpacks" +: "${CLOUDNESS_BUILD_TYPE:=}" + +# Source paths +: "${CLOUDNESS_BUILD_SOURCE_PATH:=}" +: "${CLOUDNESS_BUILD_DOCKERFILE:=Dockerfile}" + +# Image configuration +: "${CLOUDNESS_BUILD_IMAGE:=}" +: "${CLOUDNESS_BUILD_CACHE_IMAGE:=}" + +# Registry configuration +: "${CLOUDNESS_IMAGE_REGISTRY:=}" +: "${CLOUDNESS_IMAGE_MIRROR_REGISTRY:=}" +: "${CLOUDNESS_MIRROR_ENABLED:=false}" + +# Nixpacks specific +: "${CLOUDNESS_BUILD_CMD:=}" +: "${CLOUDNESS_START_CMD:=}" + +# Build args (JSON or space-separated key=value pairs) +: "${CLOUDNESS_BUILD_ARGS:=}" + +# ============================================================================== +# Validation +# ============================================================================== + +validate_inputs() { + has_errors=0 + + if [ -z "$CLOUDNESS_BUILD_TYPE" ]; then + log_error "CLOUDNESS_BUILD_TYPE is required (dockerfile or nixpacks)" + has_errors=1 + elif [ "$CLOUDNESS_BUILD_TYPE" != "dockerfile" ] && [ "$CLOUDNESS_BUILD_TYPE" != "nixpacks" ]; then + log_error "CLOUDNESS_BUILD_TYPE must be 'dockerfile' or 'nixpacks'" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_BUILD_SOURCE_PATH" ]; then + log_error "CLOUDNESS_BUILD_SOURCE_PATH is required" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_BUILD_IMAGE" ]; then + log_error "CLOUDNESS_BUILD_IMAGE is required" + has_errors=1 + fi + + if [ "$has_errors" -eq 1 ]; then + return 1 + fi + + return 0 +} + +# ============================================================================== +# BuildKit Configuration +# ============================================================================== + +setup_buildkit_config() { + + printf "\n" + BUILDKITD_CONFIG_PATH="$HOME/.config/buildkit/buildkitd.toml" + mkdir -p "$(dirname "$BUILDKITD_CONFIG_PATH")" + > "$BUILDKITD_CONFIG_PATH" + + # Main registry configuration + MAIN_REGISTRY=$(echo "$CLOUDNESS_IMAGE_REGISTRY" | cut -d'/' -f1) + + cat >> "$BUILDKITD_CONFIG_PATH" << EOF +[registry."$MAIN_REGISTRY"] + http = true + insecure = true +EOF + + # Mirror registry configuration + if [ "$CLOUDNESS_MIRROR_ENABLED" = "true" ] && [ -n "$CLOUDNESS_IMAGE_MIRROR_REGISTRY" ]; then + MIRROR_REGISTRY=$(echo "$CLOUDNESS_IMAGE_MIRROR_REGISTRY" | cut -d'/' -f1) + + cat >> "$BUILDKITD_CONFIG_PATH" << EOF + +[registry."$MIRROR_REGISTRY"] + http = true + insecure = true + +[registry."docker.io"] + mirrors = ["$CLOUDNESS_IMAGE_MIRROR_REGISTRY"] +EOF + fi +} + +# ============================================================================== +# Build Functions +# ============================================================================== + +build_with_dockerfile() { + print_section "Building with Dockerfile" + + log_info "Dockerfile: $CLOUDNESS_BUILD_DOCKERFILE" + + # Build base command + build_cmd="buildctl-daemonless.sh build \ + --frontend=dockerfile.v0 \ + --local context=$CLOUDNESS_BUILD_SOURCE_PATH \ + --local dockerfile=$CLOUDNESS_BUILD_SOURCE_PATH \ + --opt filename=$CLOUDNESS_BUILD_DOCKERFILE \ + --output type=image,name=$CLOUDNESS_BUILD_IMAGE,push=true" + + # Add cache configuration + if [ -n "$CLOUDNESS_BUILD_CACHE_IMAGE" ]; then + build_cmd="$build_cmd \ + --export-cache type=registry,ref=$CLOUDNESS_BUILD_CACHE_IMAGE,mode=max \ + --import-cache type=registry,ref=$CLOUDNESS_BUILD_CACHE_IMAGE,mode=max" + fi + + # Add build args + if [ -n "$CLOUDNESS_BUILD_ARGS" ]; then + for arg in $CLOUDNESS_BUILD_ARGS; do + build_cmd="$build_cmd --opt build-arg:$arg" + done + fi + + # Execute build + log_info "Starting build..." + if ! eval "$build_cmd"; then + log_error "Dockerfile build failed" + return 1 + fi + + return 0 +} + +build_with_nixpacks() { + print_section "Building with Nixpacks" + + # Build nixpacks command + nixpacks_cmd="nixpacks build $CLOUDNESS_BUILD_SOURCE_PATH -o $CLOUDNESS_BUILD_SOURCE_PATH" + nixpacks_cmd="$nixpacks_cmd --name $CLOUDNESS_BUILD_IMAGE" + + if [ -n "$CLOUDNESS_BUILD_CMD" ]; then + log_info "Build command: $CLOUDNESS_BUILD_CMD" + nixpacks_cmd="$nixpacks_cmd --build-cmd \"$CLOUDNESS_BUILD_CMD\"" + fi + + if [ -n "$CLOUDNESS_START_CMD" ]; then + log_info "Start command: $CLOUDNESS_START_CMD" + nixpacks_cmd="$nixpacks_cmd --start-cmd \"$CLOUDNESS_START_CMD\"" + fi + + # Add environment variables from build args + if [ -n "$CLOUDNESS_BUILD_ARGS" ]; then + for arg in $CLOUDNESS_BUILD_ARGS; do + key=$(echo "$arg" | cut -d'=' -f1) + value=$(echo "$arg" | cut -d'=' -f2-) + nixpacks_cmd="$nixpacks_cmd --env $key=\"$value\"" + done + fi + + nixpacks_cmd="$nixpacks_cmd" + + # Generate Dockerfile with Nixpacks + log_info "Generating Dockerfile with Nixpacks..." + if ! eval "$nixpacks_cmd"; then + log_error "Nixpacks generation failed" + return 1 + fi + + # Build and push with BuildKit + build_cmd="buildctl-daemonless.sh build \ + --frontend=dockerfile.v0 \ + --local context=$CLOUDNESS_BUILD_SOURCE_PATH \ + --local dockerfile=$CLOUDNESS_BUILD_SOURCE_PATH \ + --opt filename=/.nixpacks/Dockerfile \ + --output type=image,name=$CLOUDNESS_BUILD_IMAGE,push=true" + + # Add cache configuration + if [ -n "$CLOUDNESS_BUILD_CACHE_IMAGE" ]; then + build_cmd="$build_cmd \ + --export-cache type=registry,ref=$CLOUDNESS_BUILD_CACHE_IMAGE \ + --import-cache type=registry,ref=$CLOUDNESS_BUILD_CACHE_IMAGE,mode=max" + fi + + log_info "Building and pushing image..." + if ! eval "$build_cmd"; then + log_error "Image build/push failed" + return 1 + fi + + log_step "Image built and pushed successfully" + return 0 +} + +# ============================================================================== +# Main +# ============================================================================== + +main() { + if ! validate_inputs; then + exit 1 + fi + + setup_buildkit_config + + case "$CLOUDNESS_BUILD_TYPE" in + dockerfile) + if ! build_with_dockerfile; then + exit 1 + fi + ;; + nixpacks) + if ! build_with_nixpacks; then + exit 1 + fi + ;; + esac + + log_success "Build completed successfully!" +} + +main "$@" diff --git a/plugins/helper/scripts/cloudness-utils.sh b/plugins/helper/scripts/cloudness-utils.sh new file mode 100644 index 0000000..071ee46 --- /dev/null +++ b/plugins/helper/scripts/cloudness-utils.sh @@ -0,0 +1,65 @@ +#!/bin/sh +# Cloudness Shared Utilities +# Source this file in scripts: . /usr/local/lib/cloudness-utils.sh + +# ============================================================================== +# Colors +# ============================================================================== +readonly CLOUDNESS_RED='\033[1;31m' +readonly CLOUDNESS_GREEN='\033[1;32m' +readonly CLOUDNESS_YELLOW='\033[1;33m' +readonly CLOUDNESS_BLUE='\033[38;2;40;153;245m' +readonly CLOUDNESS_NC='\033[0m' + +# ============================================================================== +# Logging Functions +# ============================================================================== + +log_error() { + printf "%b\n" "${CLOUDNESS_RED}❌ $*${CLOUDNESS_NC}" >&2 +} + +log_warn() { + printf "%b\n" "${CLOUDNESS_YELLOW}⚠️ $*${CLOUDNESS_NC}" +} + +log_info() { + printf "%b\n" "$*" +} + +log_success() { + printf "%b\n" "${CLOUDNESS_GREEN}✔ $*${CLOUDNESS_NC}" +} + +log_step() { + printf "%b\n" "$* ${CLOUDNESS_GREEN}✔${CLOUDNESS_NC}" +} + +log_debug() { + if [ "${VERBOSE:-false}" = "true" ]; then + printf "${CLOUDNESS_BLUE}[DEBUG]${CLOUDNESS_NC} %s\n" "$*" + fi +} + +print_section() { + printf "${CLOUDNESS_BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CLOUDNESS_NC}\n" + printf "${CLOUDNESS_BLUE} %s${CLOUDNESS_NC}\n" "$1" + printf "${CLOUDNESS_BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CLOUDNESS_NC}\n" +} + +# ============================================================================== +# Helper Functions +# ============================================================================== + +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +# Run command with optional verbose output +run_command() { + if [ "${VERBOSE:-false}" = "true" ]; then + "$@" + else + "$@" > /dev/null 2>&1 + fi +} diff --git a/plugins/helper/scripts/deploy-script.sh b/plugins/helper/scripts/deploy-script.sh new file mode 100644 index 0000000..98ebd74 --- /dev/null +++ b/plugins/helper/scripts/deploy-script.sh @@ -0,0 +1,432 @@ +#!/bin/sh + +# Cloudness Kubernetes Deployment Script +# This script deploys applications to Kubernetes with proper resource management + +set -eu + +# ============================================================================== +# Configuration & Constants +# ============================================================================== + +# Timeout configurations (can be overridden via environment) +readonly ROLLOUT_TIMEOUT_STATELESS="${ROLLOUT_TIMEOUT_STATELESS:-60s}" +readonly ROLLOUT_TIMEOUT_STATEFUL="${ROLLOUT_TIMEOUT_STATEFUL:-120s}" +readonly PVC_RESIZE_TIMEOUT="${PVC_RESIZE_TIMEOUT:-300}" +readonly PVC_RESIZE_POLL_INTERVAL="${PVC_RESIZE_POLL_INTERVAL:-5}" + +# Required environment variables with defaults +: "${CLOUDNESS_DEPLOY_APP_IDENTIFIER:=}" +: "${CLOUDNESS_DEPLOY_APP_NAMESPACE:=}" +: "${CLOUDNESS_DEPLOY_FLAG_APP_TYPE:=}" +: "${CLOUDNESS_DEPLOY_FLAG_HAS_VOLUME:=0}" +: "${CLOUDNESS_DEPLOY_FLAG_NEED_REMOUNT:=0}" +: "${CLOUDNESS_DEPLOY_FLAG_HAS_ROUTE:=0}" +: "${CLOUDNESS_DEPLOY_PATH:=}" +: "${VERBOSE:=false}" + +# YAML file paths (set from CLOUDNESS_DEPLOY_PATH) +CLOUDNESS_DEPLOY_YAML_COMMON="" +CLOUDNESS_DEPLOY_YAML_VOLUME="" +CLOUDNESS_DEPLOY_YAML_APP="" +CLOUDNESS_DEPLOY_YAML_ROUTE="" + +# Track cleanup state +CLEANUP_DONE=false + +# ============================================================================== +# Validation Functions +# ============================================================================== + +validate_dependencies() { + missing="" + + if ! command_exists kubectl; then + missing="$missing kubectl" + fi + + if ! command_exists yq; then + missing="$missing yq" + fi + + if [ -n "$missing" ]; then + log_error "Missing required dependencies:$missing" + log_error "Please install them before running this script." + return 1 + fi + + return 0 +} + +validate_environment() { + has_errors=0 + + if [ -z "$CLOUDNESS_DEPLOY_APP_IDENTIFIER" ]; then + log_error " - CLOUDNESS_DEPLOY_APP_IDENTIFIER is required" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_DEPLOY_APP_NAMESPACE" ]; then + log_error " - CLOUDNESS_DEPLOY_APP_NAMESPACE is required" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" ]; then + log_error " - CLOUDNESS_DEPLOY_FLAG_APP_TYPE is required" + has_errors=1 + elif [ "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" != "Stateless" ] && [ "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" != "Stateful" ]; then + log_error " - CLOUDNESS_DEPLOY_FLAG_APP_TYPE must be 'Stateless' or 'Stateful'" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_DEPLOY_PATH" ]; then + log_error " - CLOUDNESS_DEPLOY_PATH is required" + has_errors=1 + fi + + if [ "$has_errors" -eq 1 ]; then + log_error "Environment validation failed" + return 1 + fi + + # Set YAML file paths from deploy path (mounted from ConfigMap) + CLOUDNESS_DEPLOY_YAML_COMMON="$CLOUDNESS_DEPLOY_PATH/common.yaml" + CLOUDNESS_DEPLOY_YAML_VOLUME="$CLOUDNESS_DEPLOY_PATH/volume.yaml" + CLOUDNESS_DEPLOY_YAML_APP="$CLOUDNESS_DEPLOY_PATH/app.yaml" + CLOUDNESS_DEPLOY_YAML_ROUTE="$CLOUDNESS_DEPLOY_PATH/route.yaml" + + return 0 +} + +# ============================================================================== +# Kubernetes Operations +# ============================================================================== + +# Returns the resource type based on app type +get_resource_type() { + if [ "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" = "Stateless" ]; then + echo "deployment" + else + echo "statefulset" + fi +} + +# Returns the opposite resource type (for cleanup) +get_opposite_resource_type() { + if [ "$CLOUDNESS_DEPLOY_FLAG_APP_TYPE" = "Stateless" ]; then + echo "statefulset" + else + echo "deployment" + fi +} + +# Apply Kubernetes configuration from YAML file +kube_apply() { + yaml_file="$1" + error_output="" + + # Skip if file doesn't exist or is empty + if [ ! -f "$yaml_file" ] || [ ! -s "$yaml_file" ]; then + return 0 + fi + + # Apply the YAML + if [ "$VERBOSE" = "true" ]; then + if ! kubectl apply -f "$yaml_file"; then + log_error "Failed to apply Kubernetes configuration from $yaml_file" + return 1 + fi + else + if ! error_output=$(kubectl apply -f "$yaml_file" 2>&1 >/dev/null); then + log_error "Failed to apply Kubernetes configuration:" + log_error "$error_output" + return 1 + fi + fi + + return 0 +} + +# Delete a Kubernetes resource +kube_delete() { + resource_type="$1" + resource_name="$2" + namespace="$3" + error_output="" + + if [ "$VERBOSE" = "true" ]; then + if ! kubectl delete "$resource_type/$resource_name" -n "$namespace" --ignore-not-found=true; then + log_warn "Failed to delete $resource_type/$resource_name" + return 1 + fi + else + if ! error_output=$(kubectl delete "$resource_type/$resource_name" -n "$namespace" --ignore-not-found=true 2>&1 >/dev/null); then + log_warn "Failed to delete $resource_type/$resource_name: $error_output" + return 1 + fi + fi + + return 0 +} + +# Wait for rollout to complete +kube_rollout_status() { + resource_type="" + timeout="" + error_output="" + + resource_type=$(get_resource_type) + + if [ "$resource_type" = "deployment" ]; then + timeout="$ROLLOUT_TIMEOUT_STATELESS" + else + timeout="$ROLLOUT_TIMEOUT_STATEFUL" + fi + + if [ "$VERBOSE" = "true" ]; then + if ! kubectl rollout status "$resource_type/$CLOUDNESS_DEPLOY_APP_IDENTIFIER" \ + -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" \ + --timeout="$timeout"; then + log_error "Rollout failed, reverting..." + kubectl rollout undo "$resource_type/$CLOUDNESS_DEPLOY_APP_IDENTIFIER" \ + -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" || true + return 1 + fi + else + if ! error_output=$(kubectl rollout status "$resource_type/$CLOUDNESS_DEPLOY_APP_IDENTIFIER" \ + -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" \ + --timeout="$timeout" 2>&1); then + log_error "$error_output" + log_error "Rollout failed, reverting..." + kubectl rollout undo "$resource_type/$CLOUDNESS_DEPLOY_APP_IDENTIFIER" \ + -n "$CLOUDNESS_DEPLOY_APP_NAMESPACE" 2>/dev/null || true + return 1 + fi + fi + + return 0 +} + +# Parse storage size to numeric GiB value +parse_size_to_gib() { + size_str="$1" + echo "$size_str" | sed 's/Gi//' +} + +# Wait for PVC to resize +kube_wait_pvc_resize() { + pvc_name="$1" + new_size="$2" + namespace="$CLOUDNESS_DEPLOY_APP_NAMESPACE" + deadline="" + current_time="" + + current_time=$(date +%s) + deadline=$((current_time + PVC_RESIZE_TIMEOUT)) + + while true; do + # Check PVC status + pvc_status="" + pvc_status=$(kubectl get pvc "$pvc_name" -n "$namespace" -o jsonpath='{.status.phase}' 2>/dev/null || echo "") + + if [ "$VERBOSE" = "true" ]; then + log_info "PVC '$pvc_name' status: $pvc_status" + fi + + # Handle WaitForFirstConsumer + if [ "$pvc_status" = "Pending" ]; then + pvc_event="" + pvc_event=$(kubectl get events -n "$namespace" \ + --field-selector "involvedObject.kind=PersistentVolumeClaim,involvedObject.name=$pvc_name" \ + --sort-by=.lastTimestamp \ + -o jsonpath='{.items[-1:].reason}' 2>/dev/null || echo "") + if [ "$pvc_event" = "WaitForFirstConsumer" ]; then + return 0 + fi + fi + + # Check if resize completed + current_size="" + current_size=$(kubectl get pvc "$pvc_name" -n "$namespace" -o jsonpath='{.status.capacity.storage}' 2>/dev/null || echo "0Gi") + + if [ "$VERBOSE" = "true" ]; then + log_info "PVC '$pvc_name' current size: $current_size, target: $new_size" + fi + if [ "$(parse_size_to_gib "$current_size")" -ge "$(parse_size_to_gib "$new_size")" ]; then + return 0 + fi + + # Check for FileSystemResizePending condition + resize_pending="" + resize_pending=$(kubectl get pvc "$pvc_name" -n "$namespace" \ + -o jsonpath='{.status.conditions[?(@.type=="FileSystemResizePending")].status}' 2>/dev/null || echo "") + if [ "$resize_pending" = "True" ]; then + log_info "Volume resized. Remounting application to finalize." + return 0 + fi + + # Check timeout + current_time=$(date +%s) + if [ "$current_time" -ge "$deadline" ]; then + log_error "Timed out after ${PVC_RESIZE_TIMEOUT}s waiting for PVC '$pvc_name' to reach $new_size" + return 1 + fi + + log_info "Waiting for volume '$pvc_name'..." + sleep "$PVC_RESIZE_POLL_INTERVAL" + done +} + +# ============================================================================== +# Deployment Functions +# ============================================================================== + +deploy_common_artifacts() { + + if ! kube_apply "$CLOUDNESS_DEPLOY_YAML_COMMON"; then + log_error "Failed to set up prerequisite artifacts" + return 1 + fi + + log_step "Prerequisite artifacts configured" + return 0 +} + +deploy_volume() { + if [ "$CLOUDNESS_DEPLOY_FLAG_HAS_VOLUME" -ne 1 ]; then + return 0 + fi + + # Handle remount for volume resize + if [ "$CLOUDNESS_DEPLOY_FLAG_NEED_REMOUNT" -eq 1 ]; then + log_info "Volume resize detected, removing statefulset for remount..." + if ! kube_delete "statefulset" "$CLOUDNESS_DEPLOY_APP_IDENTIFIER" "$CLOUDNESS_DEPLOY_APP_NAMESPACE"; then + log_error "Failed to remove statefulset for remount" + return 1 + fi + fi + + # Apply volume configuration + if ! kube_apply "$CLOUDNESS_DEPLOY_YAML_VOLUME"; then + log_error "Failed to apply volume configuration" + return 1 + fi + + # Wait for each PVC to be ready (read from file) + pvc_data="" + pvc_data=$(yq -r 'select(.kind == "PersistentVolumeClaim") | .metadata.name + " " + .spec.resources.requests.storage' "$CLOUDNESS_DEPLOY_YAML_VOLUME" 2>/dev/null || echo "") + + echo "$pvc_data" | while read -r pvc_name new_size; do + # Skip empty lines + pvc_name=$(echo "$pvc_name" | xargs) + new_size=$(echo "$new_size" | xargs) + if [ -z "$pvc_name" ] || [ -z "$new_size" ] || [ "$pvc_name" = "---" ]; then + continue + fi + + if ! kube_wait_pvc_resize "$pvc_name" "$new_size"; then + log_error "Failed to provision PVC '$pvc_name'" + return 1 + fi + done + + log_step "Volumes provisioned" + return 0 +} + +deploy_application() { + if ! kube_apply "$CLOUDNESS_DEPLOY_YAML_APP"; then + log_error "Failed to deploy application" + return 1 + fi + + if ! kube_rollout_status; then + return 1 + fi + + log_step "Application deployed" + return 0 +} + +deploy_routes() { + if [ "$CLOUDNESS_DEPLOY_FLAG_HAS_ROUTE" -ne 1 ]; then + return 0 + fi + + if ! kube_apply "$CLOUDNESS_DEPLOY_YAML_ROUTE"; then + log_error "Failed to configure HTTP routes" + return 1 + fi + + log_step "HTTP routes configured" + return 0 +} + +# ============================================================================== +# Lifecycle Management +# ============================================================================== + +cleanup() { + if [ "$CLEANUP_DONE" = "true" ]; then + return 0 + fi + CLEANUP_DONE=true + + log_info "Running cleanup..." + + opposite_type="" + opposite_type=$(get_opposite_resource_type) + + kube_delete "$opposite_type" "$CLOUDNESS_DEPLOY_APP_IDENTIFIER" "$CLOUDNESS_DEPLOY_APP_NAMESPACE" || true +} + +on_exit() { + exit_code=$? + if [ $exit_code -ne 0 ]; then + log_error "Deployment failed with exit code $exit_code" + fi + cleanup + exit $exit_code +} + +# ============================================================================== +# Main Entrypoint +# ============================================================================== + +main() { + # Set up exit trap + trap on_exit EXIT + + print_section "Deploying application" + + # Validate prerequisites + if ! validate_dependencies; then + exit 1 + fi + + if ! validate_environment; then + exit 1 + fi + + # Execute deployment steps + if ! deploy_common_artifacts; then + exit 1 + fi + + if ! deploy_volume; then + exit 1 + fi + + if ! deploy_application; then + exit 1 + fi + + if ! deploy_routes; then + exit 1 + fi + + log_success "Deployment completed successfully!" +} + +# Run main function +main "$@" diff --git a/plugins/helper/scripts/init-script.sh b/plugins/helper/scripts/init-script.sh new file mode 100644 index 0000000..baeab36 --- /dev/null +++ b/plugins/helper/scripts/init-script.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# Cloudness Git Clone Script +# This script handles git repository cloning for the build process + +set -eu + +# ============================================================================== +# Configuration +# ============================================================================== + +: "${CLOUDNESS_GIT_REPO_URL:=}" +: "${CLOUDNESS_GIT_BRANCH:=}" +: "${CLOUDNESS_GIT_COMMIT:=}" +: "${CLOUDNESS_BUILD_PATH:=}" + +# Optional netrc credentials +: "${GIT_MACHINE:=}" +: "${GIT_LOGIN:=}" +: "${GIT_PASSWORD:=}" + +# ============================================================================== +# Validation +# ============================================================================== + +validate_inputs() { + has_errors=0 + + if [ -z "$CLOUDNESS_GIT_REPO_URL" ]; then + log_error "CLOUDNESS_GIT_REPO_URL is required" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_GIT_BRANCH" ]; then + log_error "CLOUDNESS_GIT_BRANCH is required" + has_errors=1 + fi + + if [ -z "$CLOUDNESS_BUILD_PATH" ]; then + log_error "CLOUDNESS_BUILD_PATH is required" + has_errors=1 + fi + + if [ "$has_errors" -eq 1 ]; then + return 1 + fi + + return 0 +} + +# ============================================================================== +# Git Operations +# ============================================================================== + +setup_netrc() { + if [ -n "$GIT_MACHINE" ] && [ -n "$GIT_LOGIN" ] && [ -n "$GIT_PASSWORD" ]; then + log_info "Configuring git credentials..." + echo "machine $GIT_MACHINE login $GIT_LOGIN password $GIT_PASSWORD" > ~/.netrc + chmod 600 ~/.netrc + fi +} + +clone_repository() { + print_section "Cloning Repository" + + log_info "Repository: $CLOUDNESS_GIT_REPO_URL" + log_info "Branch: $CLOUDNESS_GIT_BRANCH" + + if ! git clone "$CLOUDNESS_GIT_REPO_URL" --branch "$CLOUDNESS_GIT_BRANCH" "$CLOUDNESS_BUILD_PATH"; then + log_error "Failed to clone repository" + return 1 + fi + + # Checkout specific commit if provided + if [ -n "$CLOUDNESS_GIT_COMMIT" ]; then + log_info "Checking out commit: $CLOUDNESS_GIT_COMMIT" + git -C "$CLOUDNESS_BUILD_PATH" config advice.detachedHead false + if ! git -C "$CLOUDNESS_BUILD_PATH" checkout "$CLOUDNESS_GIT_COMMIT"; then + log_error "Failed to checkout commit $CLOUDNESS_GIT_COMMIT" + return 1 + fi + fi + + return 0 +} + +# ============================================================================== +# Main +# ============================================================================== + +main() { + if ! validate_inputs; then + exit 1 + fi + + setup_netrc + + if ! clone_repository; then + exit 1 + fi + + log_success "Repository cloned successfully!" +} + +main "$@" diff --git a/runbooks/dev/loacalbuilder.md b/runbooks/dev/loacalbuilder.md new file mode 100644 index 0000000..da3573d --- /dev/null +++ b/runbooks/dev/loacalbuilder.md @@ -0,0 +1,10 @@ +## Quick Start (All-in-One) + +```bash +# Run from plugins directory +cd plugins/helper +export IMAGE_TAG="1.0.0" && \ + docker build -f Dockerfile -t cloudnessio/helper:${IMAGE_TAG} . && \ + sudo docker save cloudnessio/helper:${IMAGE_TAG} | \ + sudo ctr -a /run/k3s/containerd/containerd.sock -n=k8s.io images import - +``` diff --git a/scripts/install/cloudness-runner-rbac.yaml b/scripts/install/cloudness-runner-rbac.yaml index e9b78cd..f807065 100644 --- a/scripts/install/cloudness-runner-rbac.yaml +++ b/scripts/install/cloudness-runner-rbac.yaml @@ -4,20 +4,22 @@ metadata: name: cloudness-runner-role namespace: cloudness rules: - - apiGroups: [''] # '' indicates core apiGroup (don't remove) - resources: ['namespaces','serviceaccounts','persistentvolumeclaims','events'] - verbs: ["get", "list", "create", "patch", "delete"] + - apiGroups: [""] # '' indicates core apiGroup (don't remove) + resources: + ["namespaces", "serviceaccounts", "persistentvolumeclaims", "events"] + verbs: ["get", "list", "create", "patch", "delete", "watch"] - apiGroups: [""] - resources: ["pods", "services", "endpoints", "configmaps", "secrets"] + resources: ["pods", "services", "endpoints", "configmaps", "secrets"] verbs: ["get", "list", "create", "patch", "delete"] - - apiGroups: ['apps'] # '' indicates core apiGroup (don't remove) - resources: ['statefulsets', 'deployments', 'replicasets','controllerrevisions'] + - apiGroups: ["apps"] # '' indicates core apiGroup (don't remove) + resources: + ["statefulsets", "deployments", "replicasets", "controllerrevisions"] verbs: ["get", "list", "create", "patch", "watch", "delete"] - apiGroups: ["autoscaling"] resources: ["horizontalpodautoscalers"] verbs: ["get", "list", "create", "patch", "delete"] - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles","rolebindings"] + resources: ["roles", "rolebindings"] verbs: ["get", "list", "create", "patch", "delete"] - apiGroups: ["gateway.networking.k8s.io"] resources: ["httproutes"] @@ -43,4 +45,4 @@ subjects: roleRef: kind: ClusterRole name: cloudness-runner-role - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + apiGroup: rbac.authorization.k8s.io diff --git a/scripts/install/install.sh b/scripts/install/install.sh index 9f0126b..ba4d9a3 100755 --- a/scripts/install/install.sh +++ b/scripts/install/install.sh @@ -9,7 +9,7 @@ set -e RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' -BLUE='\033[0;34m' +BLUE='\033[38;2;40;153;245m' NC='\033[0m' # No Color # Configuration