diff --git a/cmd/go.mod b/cmd/go.mod index ffaba24..7dc284d 100644 --- a/cmd/go.mod +++ b/cmd/go.mod @@ -5,6 +5,7 @@ go 1.25.3 replace github.com/posit-dev/ptd/lib => ../lib require ( + github.com/BurntSushi/toml v1.6.0 github.com/charmbracelet/log v0.4.2 github.com/posit-dev/ptd/lib v0.0.0-00010101000000-000000000000 github.com/spf13/cobra v1.9.1 @@ -28,7 +29,6 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect - github.com/BurntSushi/toml v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect diff --git a/cmd/go.sum b/cmd/go.sum index 890e1e1..9301103 100644 --- a/cmd/go.sum +++ b/cmd/go.sum @@ -36,8 +36,8 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= diff --git a/cmd/internal/verify/cleanup.go b/cmd/internal/verify/cleanup.go new file mode 100644 index 0000000..f58fab5 --- /dev/null +++ b/cmd/internal/verify/cleanup.go @@ -0,0 +1,149 @@ +package verify + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "strings" + "time" +) + +// CleanupCredentials deletes VIP test credentials and resources. +func CleanupCredentials(ctx context.Context, env []string, namespace, connectURL string) error { + // Read vip-test-credentials Secret to get the Connect API key and key name + cmd := exec.CommandContext(ctx, "kubectl", "get", "secret", vipTestCredentialsSecret, + "-n", namespace, + "-o", "json") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + // Secret doesn't exist, nothing to clean up + if strings.Contains(string(exitErr.Stderr), "not found") { + fmt.Fprintf(os.Stderr, "No credentials secret found, nothing to clean up\n") + return nil + } + return fmt.Errorf("failed to get credentials secret: %s", string(exitErr.Stderr)) + } + return fmt.Errorf("failed to get credentials secret: %w", err) + } + + var secret struct { + Data map[string]string `json:"data"` + } + if err := json.Unmarshal(output, &secret); err != nil { + return fmt.Errorf("failed to parse secret: %w", err) + } + + // Extract and decode the Connect API key and key name + apiKeyB64, hasAPIKey := secret.Data["VIP_CONNECT_API_KEY"] + keyNameB64, hasKeyName := secret.Data["VIP_CONNECT_KEY_NAME"] + + if hasAPIKey && hasKeyName && connectURL != "" { + // Decode base64 values + apiKeyBytes, err := base64.StdEncoding.DecodeString(apiKeyB64) + if err != nil { + return fmt.Errorf("failed to decode API key: %w", err) + } + apiKey := string(apiKeyBytes) + + keyNameBytes, err := base64.StdEncoding.DecodeString(keyNameB64) + if err != nil { + return fmt.Errorf("failed to decode key name: %w", err) + } + keyName := string(keyNameBytes) + + // Delete the Connect API key via the Connect API + if err := deleteConnectAPIKey(ctx, connectURL, apiKey, keyName); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to delete Connect API key: %v\n", err) + // Continue with cleanup even if API key deletion fails + } else { + fmt.Fprintf(os.Stderr, "Deleted Connect API key: %s\n", keyName) + } + } + + // Delete the vip-test-credentials K8s Secret + deleteCmd := exec.CommandContext(ctx, "kubectl", "delete", "secret", vipTestCredentialsSecret, + "-n", namespace, + "--ignore-not-found") + deleteCmd.Env = env + + if err := deleteCmd.Run(); err != nil { + return fmt.Errorf("failed to delete credentials secret: %w", err) + } + + fmt.Fprintf(os.Stderr, "Deleted credentials secret: %s\n", vipTestCredentialsSecret) + return nil +} + +// deleteConnectAPIKey deletes a Connect API key by name using the Connect API. +func deleteConnectAPIKey(ctx context.Context, connectURL, apiKey, keyName string) error { + client := &http.Client{Timeout: 30 * time.Second} + + // GET all API keys for the user + listURL := fmt.Sprintf("%s/__api__/v1/user/api_keys", connectURL) + req, err := http.NewRequestWithContext(ctx, "GET", listURL, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Key "+apiKey) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("list API keys failed with status %d: %s", resp.StatusCode, string(body)) + } + + var apiKeys []struct { + ID string `json:"id"` + Name string `json:"name"` + } + if err := json.NewDecoder(resp.Body).Decode(&apiKeys); err != nil { + return fmt.Errorf("failed to parse API keys response: %w", err) + } + + // Find the key by name + var keyID string + for _, key := range apiKeys { + if key.Name == keyName { + keyID = key.ID + break + } + } + + if keyID == "" { + return fmt.Errorf("API key with name %q not found", keyName) + } + + // DELETE the API key by ID + deleteURL := fmt.Sprintf("%s/__api__/v1/user/api_keys/%s", connectURL, keyID) + req, err = http.NewRequestWithContext(ctx, "DELETE", deleteURL, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Key "+apiKey) + + resp, err = client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("delete API key failed with status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} diff --git a/cmd/internal/verify/config.go b/cmd/internal/verify/config.go new file mode 100644 index 0000000..64e885b --- /dev/null +++ b/cmd/internal/verify/config.go @@ -0,0 +1,175 @@ +package verify + +import ( + "bytes" + "fmt" + + "github.com/BurntSushi/toml" +) + +// VIPConfig represents the vip.toml configuration structure +type VIPConfig struct { + General GeneralConfig `toml:"general"` + Connect ProductConfig `toml:"connect"` + Workbench ProductConfig `toml:"workbench"` + PackageManager ProductConfig `toml:"package_manager"` + Auth AuthConfig `toml:"auth"` + Email DisableableConfig `toml:"email"` + Monitoring DisableableConfig `toml:"monitoring"` + Security SecurityConfig `toml:"security"` +} + +type GeneralConfig struct { + DeploymentName string `toml:"deployment_name"` +} + +type ProductConfig struct { + Enabled bool `toml:"enabled"` + URL string `toml:"url,omitempty"` +} + +type AuthConfig struct { + Provider string `toml:"provider"` +} + +type DisableableConfig struct { + Enabled bool `toml:"enabled"` +} + +type SecurityConfig struct { + PolicyChecksEnabled bool `toml:"policy_checks_enabled"` +} + +// SiteCR represents the Kubernetes Site custom resource +type SiteCR struct { + Spec SiteSpec `yaml:"spec"` +} + +type SiteSpec struct { + Domain string `yaml:"domain"` + Connect *ProductSpec `yaml:"connect,omitempty"` + Workbench *ProductSpec `yaml:"workbench,omitempty"` + PackageManager *ProductSpec `yaml:"packageManager,omitempty"` + Keycloak *KeycloakSpec `yaml:"keycloak,omitempty"` +} + +type ProductSpec struct { + DomainPrefix string `yaml:"domainPrefix,omitempty"` + // BaseDomain is the bare parent domain for this product (e.g. "example.com"). + // It must NOT include the product subdomain; buildProductURL always prepends the + // product prefix (DomainPrefix or the default) to form the final URL. + // For example, BaseDomain="example.com" with default prefix "connect" yields + // "https://connect.example.com". + BaseDomain string `yaml:"baseDomain,omitempty"` + Auth *AuthSpec `yaml:"auth,omitempty"` +} + +type AuthSpec struct { + Type string `yaml:"type"` +} + +type KeycloakSpec struct { + Enabled bool `yaml:"enabled"` +} + +// GenerateConfig generates a vip.toml configuration from a parsed Site CR +func GenerateConfig(site *SiteCR, targetName string) (string, error) { + if site == nil { + return "", fmt.Errorf("site cannot be nil") + } + + needsDomain := (site.Spec.Connect != nil && site.Spec.Connect.BaseDomain == "") || + (site.Spec.Workbench != nil && site.Spec.Workbench.BaseDomain == "") || + (site.Spec.PackageManager != nil && site.Spec.PackageManager.BaseDomain == "") + if site.Spec.Domain == "" && needsDomain { + return "", fmt.Errorf("site domain is required when products are configured without a per-product baseDomain") + } + + config := VIPConfig{ + General: GeneralConfig{ + DeploymentName: targetName, + }, + Email: DisableableConfig{ + Enabled: false, + }, + Monitoring: DisableableConfig{ + Enabled: false, + }, + Security: SecurityConfig{ + PolicyChecksEnabled: false, + }, + } + + // Determine auth provider. PackageManager is intentionally excluded: it does not + // support authentication types that VIP tests against, so its auth spec is not consulted. + authProvider := "oidc" // default + if site.Spec.Connect != nil && site.Spec.Connect.Auth != nil && site.Spec.Connect.Auth.Type != "" { + authProvider = site.Spec.Connect.Auth.Type + } else if site.Spec.Workbench != nil && site.Spec.Workbench.Auth != nil && site.Spec.Workbench.Auth.Type != "" { + authProvider = site.Spec.Workbench.Auth.Type + } + config.Auth = AuthConfig{Provider: authProvider} + + // Configure Connect + if site.Spec.Connect != nil { + productURL := buildProductURL(site.Spec.Connect, "connect", site.Spec.Domain) + config.Connect = ProductConfig{ + Enabled: true, + URL: productURL, + } + } else { + config.Connect = ProductConfig{Enabled: false} + } + + // Configure Workbench + if site.Spec.Workbench != nil { + productURL := buildProductURL(site.Spec.Workbench, "workbench", site.Spec.Domain) + config.Workbench = ProductConfig{ + Enabled: true, + URL: productURL, + } + } else { + config.Workbench = ProductConfig{Enabled: false} + } + + // Configure Package Manager + if site.Spec.PackageManager != nil { + productURL := buildProductURL(site.Spec.PackageManager, "packagemanager", site.Spec.Domain) + config.PackageManager = ProductConfig{ + Enabled: true, + URL: productURL, + } + } else { + config.PackageManager = ProductConfig{Enabled: false} + } + + // Encode to TOML + var buf bytes.Buffer + encoder := toml.NewEncoder(&buf) + if err := encoder.Encode(config); err != nil { + return "", fmt.Errorf("failed to encode TOML: %w", err) + } + + return buf.String(), nil +} + +// buildProductURL constructs the product URL from the product spec. +// The prefix (DomainPrefix or defaultPrefix) is always prepended to the domain, so +// ProductSpec.BaseDomain must be a bare parent domain (e.g. "example.com"), not a +// fully-qualified hostname that already includes the product subdomain. +func buildProductURL(spec *ProductSpec, defaultPrefix, baseDomain string) string { + if spec == nil { + return fmt.Sprintf("https://%s.%s", defaultPrefix, baseDomain) + } + prefix := defaultPrefix + if spec.DomainPrefix != "" { + prefix = spec.DomainPrefix + } + + domain := baseDomain + if spec.BaseDomain != "" { + domain = spec.BaseDomain + } + + return fmt.Sprintf("https://%s.%s", prefix, domain) +} diff --git a/cmd/internal/verify/credentials.go b/cmd/internal/verify/credentials.go new file mode 100644 index 0000000..dacd967 --- /dev/null +++ b/cmd/internal/verify/credentials.go @@ -0,0 +1,135 @@ +package verify + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// MintConnectKey calls the VIP CLI to mint a Connect API key via interactive browser auth. +// Returns the API key and key name from the VIP CLI output. +func MintConnectKey(ctx context.Context, connectURL string) (apiKey string, keyName string, err error) { + // Check if vip CLI is available + if _, err := exec.LookPath("vip"); err != nil { + return "", "", fmt.Errorf("VIP CLI not found. Install with: pip install /path/to/vip") + } + + cmd := exec.CommandContext(ctx, "vip", "auth", "mint-connect-key", "--url", connectURL) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return "", "", fmt.Errorf("vip auth mint-connect-key failed: %s", string(exitErr.Stderr)) + } + return "", "", fmt.Errorf("vip auth mint-connect-key failed: %w", err) + } + + // Parse JSON output: {"api_key": "...", "key_name": "..."} + var result struct { + APIKey string `json:"api_key"` + KeyName string `json:"key_name"` + } + if err := json.Unmarshal(output, &result); err != nil { + return "", "", fmt.Errorf("failed to parse vip CLI output: %w", err) + } + + if result.APIKey == "" || result.KeyName == "" { + return "", "", fmt.Errorf("vip CLI returned empty api_key or key_name") + } + + return result.APIKey, result.KeyName, nil +} + +// GenerateWorkbenchToken generates a Workbench API token via kubectl exec. +func GenerateWorkbenchToken(ctx context.Context, env []string, namespace, siteName, username string) (string, error) { + deploymentName := fmt.Sprintf("workbench-%s", siteName) + cmd := exec.CommandContext(ctx, "kubectl", "exec", + fmt.Sprintf("deploy/%s", deploymentName), + "-n", namespace, + "--", + "rstudio-server", "generate-api-token", "user", "vip-test", username) + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return "", fmt.Errorf("kubectl exec generate-api-token failed: %s", string(exitErr.Stderr)) + } + return "", fmt.Errorf("kubectl exec generate-api-token failed: %w", err) + } + + return strings.TrimSpace(string(output)), nil +} + +// GeneratePackageManagerToken generates a PM token via kubectl exec. +func GeneratePackageManagerToken(ctx context.Context, env []string, namespace, siteName string) (string, error) { + deploymentName := fmt.Sprintf("package-manager-%s", siteName) + cmd := exec.CommandContext(ctx, "kubectl", "exec", + fmt.Sprintf("deploy/%s", deploymentName), + "-n", namespace, + "--", + "rspm", "create", "token", "--scope=repos:read", "--quiet") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return "", fmt.Errorf("kubectl exec rspm create token failed: %s", string(exitErr.Stderr)) + } + return "", fmt.Errorf("kubectl exec rspm create token failed: %w", err) + } + + return strings.TrimSpace(string(output)), nil +} + +// SaveCredentialsSecret creates or updates the vip-test-credentials K8s Secret with all tokens. +func SaveCredentialsSecret(ctx context.Context, env []string, namespace string, creds map[string]string) error { + // Build kubectl create secret generic command + args := []string{"create", "secret", "generic", vipTestCredentialsSecret, "-n", namespace} + for key, value := range creds { + args = append(args, fmt.Sprintf("--from-literal=%s=%s", key, value)) + } + args = append(args, "--dry-run=client", "-o", "json") + + // Generate the secret manifest + cmd := exec.CommandContext(ctx, "kubectl", args...) + cmd.Env = env + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return fmt.Errorf("kubectl create secret failed: %s", string(exitErr.Stderr)) + } + return fmt.Errorf("kubectl create secret failed: %w", err) + } + + // Parse the generated secret to add labels + var secret map[string]interface{} + if err := json.Unmarshal(output, &secret); err != nil { + return fmt.Errorf("failed to parse secret manifest: %w", err) + } + + // Add labels + metadata := secret["metadata"].(map[string]interface{}) + metadata["labels"] = map[string]string{ + "app.kubernetes.io/managed-by": "ptd", + "app.kubernetes.io/name": "vip-verify", + } + + // Marshal back to JSON + secretJSON, err := json.Marshal(secret) + if err != nil { + return fmt.Errorf("failed to marshal secret: %w", err) + } + + // Apply the secret + applyCmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", "-", "-n", namespace) + applyCmd.Env = env + applyCmd.Stdin = strings.NewReader(string(secretJSON)) + + if output, err := applyCmd.CombinedOutput(); err != nil { + return fmt.Errorf("kubectl apply secret failed: %s", string(output)) + } + + return nil +} diff --git a/cmd/internal/verify/job.go b/cmd/internal/verify/job.go new file mode 100644 index 0000000..c0ca78d --- /dev/null +++ b/cmd/internal/verify/job.go @@ -0,0 +1,481 @@ +package verify + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "os" + "os/exec" + "strings" + "time" +) + +// JobOptions contains options for creating a VIP verification Job +type JobOptions struct { + Image string + Categories string + JobName string + ConfigName string + Namespace string + CredentialsAvailable bool // whether vip-test-credentials Secret exists + InteractiveAuth bool // whether using interactive auth (API tokens) vs Keycloak (username/password) + Timeout time.Duration +} + +// CreateConfigMap creates a Kubernetes ConfigMap with the vip.toml configuration +func CreateConfigMap(ctx context.Context, env []string, configName string, config string, namespace string) error { + // Create a temporary file with the config + tmpfile, err := os.CreateTemp("", "vip-config-*.toml") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(tmpfile.Name()) + + if _, err := tmpfile.WriteString(config); err != nil { + tmpfile.Close() + return fmt.Errorf("failed to write config to temp file: %w", err) + } + tmpfile.Close() + + // Create ConfigMap from the file + cmd := exec.CommandContext(ctx, "kubectl", "create", "configmap", configName, + "--from-file=vip.toml="+tmpfile.Name(), + "-n", namespace, + "--dry-run=client", + "-o", "yaml") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return fmt.Errorf("kubectl create configmap failed: %s", string(exitErr.Stderr)) + } + return fmt.Errorf("kubectl create configmap failed: %w", err) + } + + // Apply the ConfigMap + applyCmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", "-", "-n", namespace) + applyCmd.Env = env + applyCmd.Stdin = strings.NewReader(string(output)) + + if output, err := applyCmd.CombinedOutput(); err != nil { + return fmt.Errorf("kubectl apply configmap failed: %s", string(output)) + } + + return nil +} + +// buildJobSpec constructs the Kubernetes Job spec as a map ready for JSON marshalling. +// Note: unlike the local run (which passes --config ), the Job does not pass +// --config explicitly. VIP defaults to reading /app/vip.toml, which is exactly where +// the ConfigMap is mounted (see volumeMounts below). If VIP ever changes its default +// config path, this assumption will silently break; update the mountPath accordingly. +func buildJobSpec(opts JobOptions) map[string]interface{} { + args := []string{"--tb=short", "-v"} + if opts.Categories != "" { + args = append(args, "-m", opts.Categories) + } + + // Derive activeDeadlineSeconds from the CLI timeout with a 60-second buffer so the + // pod is killed before the outer poll times out. Fall back to 600s if not set. + activeDeadlineSeconds := int64(600) + if opts.Timeout > 0 { + derived := int64(opts.Timeout.Seconds()) - 60 + if derived < 60 { + derived = 60 + } + activeDeadlineSeconds = derived + } + backoffLimit := int32(0) + + container := map[string]interface{}{ + "name": "vip", + "image": opts.Image, + "args": args, + "volumeMounts": []map[string]interface{}{ + { + "name": "config", + "mountPath": "/app/vip.toml", + "subPath": "vip.toml", + }, + }, + } + if opts.CredentialsAvailable { + if opts.InteractiveAuth { + // Interactive auth mode: use API tokens from Secret + container["env"] = []map[string]interface{}{ + { + "name": "VIP_CONNECT_API_KEY", + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]string{ + "name": "vip-test-credentials", + "key": "VIP_CONNECT_API_KEY", + }, + }, + }, + { + "name": "VIP_WORKBENCH_API_KEY", + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]string{ + "name": "vip-test-credentials", + "key": "VIP_WORKBENCH_API_KEY", + }, + }, + }, + { + "name": "VIP_PM_TOKEN", + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]string{ + "name": "vip-test-credentials", + "key": "VIP_PM_TOKEN", + }, + }, + }, + } + } else { + // Keycloak mode: use username/password from Secret + container["env"] = []map[string]interface{}{ + { + "name": "VIP_TEST_USERNAME", + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]string{ + "name": "vip-test-credentials", + "key": "username", + }, + }, + }, + { + "name": "VIP_TEST_PASSWORD", + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]string{ + "name": "vip-test-credentials", + "key": "password", + }, + }, + }, + } + } + } + + return map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": map[string]interface{}{ + "name": opts.JobName, + "namespace": opts.Namespace, + "labels": map[string]string{ + "app.kubernetes.io/name": "vip-verify", + "app.kubernetes.io/managed-by": "ptd", + }, + }, + "spec": map[string]interface{}{ + "backoffLimit": backoffLimit, + "activeDeadlineSeconds": activeDeadlineSeconds, + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "restartPolicy": "Never", + "containers": []map[string]interface{}{container}, + "volumes": []map[string]interface{}{ + { + "name": "config", + "configMap": map[string]string{ + "name": opts.ConfigName, + }, + }, + }, + }, + }, + }, + } +} + +// CreateJob creates a Kubernetes Job for running VIP tests. +// Uses JSON serialization to prevent YAML injection via user-controlled fields. +func CreateJob(ctx context.Context, env []string, opts JobOptions) error { + job := buildJobSpec(opts) + + jobJSON, err := json.Marshal(job) + if err != nil { + return fmt.Errorf("failed to marshal job spec: %w", err) + } + + cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", "-", "-n", opts.Namespace) + cmd.Env = env + cmd.Stdin = strings.NewReader(string(jobJSON)) + + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("kubectl apply job failed: %s", string(output)) + } + + return nil +} + +// errImagePull is returned by StreamLogs when the pod cannot start due to a permanent +// image pull failure (ImagePullBackOff or ErrImagePull). Callers should treat this as +// a fatal error and abort without waiting for the Job to complete. +var errImagePull = errors.New("image pull failed") + +// waitForPodRunning waits for the pod to leave Pending/Init state before streaming logs. +// This avoids a spurious "unexpected pod phase Pending" warning when kubectl logs is +// called immediately after the pod object is created but before the container starts. +func waitForPodRunning(ctx context.Context, env []string, podName, namespace string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for pod to start") + case <-ticker.C: + phase, err := getPodPhase(ctx, env, podName, namespace) + if err != nil { + continue + } + if phase == "Running" || phase == "Succeeded" || phase == "Failed" { + return nil + } + } + } +} + +// getPodWaitingReason returns the waiting reason for the first container in the pod. +// Returns an empty string if the pod is not in a waiting state or on any error. +func getPodWaitingReason(ctx context.Context, env []string, podName, namespace string) string { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "get", "pod", podName, + "-n", namespace, + "-o", "jsonpath={.status.containerStatuses[0].state.waiting.reason}") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(output)) +} + +// StreamLogs follows the logs of the Job pod. timeout is the overall job timeout and is +// used to scale the pod-start wait; pass 0 to use the default 60-second pod-start wait. +func StreamLogs(ctx context.Context, env []string, jobName string, namespace string, timeout time.Duration) error { + // Wait for pod to be created (timeout after 30 seconds) + podName, err := waitForPod(ctx, env, jobName, 30*time.Second, namespace) + if err != nil { + return err + } + + // Scale the pod-start timeout from the overall job timeout, capped at 5 minutes. + podStartTimeout := 60 * time.Second + if timeout > 0 { + if t := timeout / 4; t < 5*time.Minute { + podStartTimeout = t + } else { + podStartTimeout = 5 * time.Minute + } + } + + // Wait for the container to start before streaming to avoid spurious warnings + // when kubectl logs is called while the pod is still in Pending/Init state. + if err := waitForPodRunning(ctx, env, podName, namespace, podStartTimeout); err != nil { + // Detect permanent image pull failures and surface them immediately rather + // than silently proceeding to stream logs and waiting for the full job timeout. + checkCtx, checkCancel := context.WithTimeout(context.Background(), 5*time.Second) + reason := getPodWaitingReason(checkCtx, env, podName, namespace) + checkCancel() + if reason == "ImagePullBackOff" || reason == "ErrImagePull" { + return fmt.Errorf("%w: pod %s reason %s (check image name and pull credentials)", errImagePull, podName, reason) + } + slog.Warn("timed out waiting for pod to start; attempting log stream anyway", "pod", podName) + } + + // Stream the pod logs + cmd := exec.CommandContext(ctx, "kubectl", "logs", "-f", podName, "-n", namespace) + cmd.Env = env + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 { + // kubectl logs -f exits with code 1 when the pod has already completed, + // but also for network errors, RBAC failures, and pod eviction. + // Check the pod phase to distinguish normal completion from unexpected errors. + phase, phaseErr := getPodPhase(ctx, env, podName, namespace) + if phaseErr == nil && (phase == "Succeeded" || phase == "Failed") { + // Pod completed normally; log stream ended because the pod is done. + return nil + } + if phaseErr != nil { + return fmt.Errorf("kubectl logs exited with code 1; could not determine pod phase: %w", phaseErr) + } + return fmt.Errorf("kubectl logs exited with code 1; unexpected pod phase %q", phase) + } + return fmt.Errorf("failed to stream logs: %w", err) + } + + return nil +} + +// getPodPhase returns the phase of a pod (e.g. "Running", "Succeeded", "Failed"). +func getPodPhase(ctx context.Context, env []string, podName, namespace string) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "get", "pod", podName, + "-n", namespace, + "-o", "jsonpath={.status.phase}") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(output)), nil +} + +// jobLabelSelector returns the label selector for pods created by a Job. +// batch.kubernetes.io/job-name was introduced in Kubernetes 1.27; older clusters use +// the legacy "job-name" label. This probes the server version once to avoid issuing +// two kubectl calls per poll tick on pre-1.27 clusters. On any error it defaults to +// the modern label, which is correct for all current clusters. +func jobLabelSelector(env []string, jobName string) string { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "version", "-o", "json") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + return "batch.kubernetes.io/job-name=" + jobName + } + + var v struct { + ServerVersion struct { + Minor string `json:"minor"` + } `json:"serverVersion"` + } + if err := json.Unmarshal(output, &v); err != nil { + return "batch.kubernetes.io/job-name=" + jobName + } + + var minor int + fmt.Sscanf(v.ServerVersion.Minor, "%d", &minor) + if minor > 0 && minor < 27 { + return "job-name=" + jobName + } + return "batch.kubernetes.io/job-name=" + jobName +} + +// waitForPod waits for a pod associated with the job to be created +func waitForPod(ctx context.Context, env []string, jobName string, timeout time.Duration, namespace string) (string, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + // Probe the cluster label style once to avoid issuing two kubectl calls per tick + // on pre-1.27 clusters where the modern label is not present. + label := jobLabelSelector(env, jobName) + + for { + select { + case <-ctx.Done(): + if ctx.Err() == context.DeadlineExceeded { + return "", fmt.Errorf("timeout waiting for pod to be created") + } + return "", fmt.Errorf("cancelled while waiting for pod to be created") + case <-ticker.C: + cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", + "-n", namespace, + "-l", label, + "-o", "jsonpath={.items[0].metadata.name}") + cmd.Env = env + + output, err := cmd.Output() + if err == nil && len(output) > 0 { + return strings.TrimSpace(string(output)), nil + } + } + } +} + +// WaitForJob waits for the Job to complete and returns success status +func WaitForJob(ctx context.Context, env []string, jobName string, namespace string, timeout time.Duration) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + if ctx.Err() == context.DeadlineExceeded { + return false, fmt.Errorf("timeout waiting for job to complete") + } + return false, fmt.Errorf("cancelled while waiting for job to complete") + case <-ticker.C: + cmd := exec.CommandContext(ctx, "kubectl", "get", "job", jobName, + "-n", namespace, + "-o", "jsonpath={.status.conditions[?(@.type==\"Complete\")].status},{.status.conditions[?(@.type==\"Failed\")].status}") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + slog.Warn("kubectl get job failed, retrying", "error", string(exitErr.Stderr)) + } else { + slog.Warn("kubectl get job failed, retrying", "error", err) + } + continue + } + + if done, success := parseJobStatus(string(output)); done { + return success, nil + } + } + } +} + +// parseJobStatus parses kubectl jsonpath output for job Complete/Failed conditions. +// The output format is "{Complete.status},{Failed.status}" where each is "True", "False", or empty. +// Using a comma separator prevents ambiguity when one condition is absent (outputs as empty string). +// Returns (done, success): done=true means the job has finished. +func parseJobStatus(output string) (done bool, success bool) { + parts := strings.SplitN(strings.TrimSpace(output), ",", 2) + if len(parts) >= 1 && parts[0] == "True" { + return true, true + } + if len(parts) >= 2 && parts[1] == "True" { + return true, false + } + return false, false +} + +// Cleanup removes the Job and ConfigMap +func Cleanup(ctx context.Context, env []string, jobName string, configName string, namespace string) error { + var errs []error + + // Delete job + jobCmd := exec.CommandContext(ctx, "kubectl", "delete", "job", jobName, "-n", namespace, "--ignore-not-found") + jobCmd.Env = env + if err := jobCmd.Run(); err != nil { + errs = append(errs, fmt.Errorf("failed to delete job: %w", err)) + } + + // Delete configmap (always attempt even if job deletion failed) + cmCmd := exec.CommandContext(ctx, "kubectl", "delete", "configmap", configName, "-n", namespace, "--ignore-not-found") + cmCmd.Env = env + if err := cmCmd.Run(); err != nil { + errs = append(errs, fmt.Errorf("failed to delete configmap: %w", err)) + } + + return errors.Join(errs...) +} diff --git a/cmd/internal/verify/keycloak.go b/cmd/internal/verify/keycloak.go new file mode 100644 index 0000000..2c570a7 --- /dev/null +++ b/cmd/internal/verify/keycloak.go @@ -0,0 +1,364 @@ +package verify + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log/slog" + "math/big" + "net/http" + "net/url" + "os/exec" + "strings" + "time" +) + +const keycloakHTTPTimeout = 30 * time.Second + +// vipTestCredentialsSecret is the name of the K8s Secret that holds VIP test user credentials. +const vipTestCredentialsSecret = "vip-test-credentials" + +// EnsureTestUser ensures a test user exists in Keycloak and credentials are in a Secret. +// adminSecretName is the K8s secret holding the Keycloak admin credentials. +// +// Note: there is a theoretical TOCTOU race if two ptd verify invocations run concurrently +// against the same namespace. Both could pass the "secret missing" check, each generate a +// different password, call createKeycloakUser (which resets the password), and then both +// kubectl-apply the secret—leaving the secret with one password and Keycloak with the other. +// This is accepted as benign: ptd verify is designed for single-operator use, and +// createKeycloakUser resets the password idempotently so a retry will always reconcile. +func EnsureTestUser(ctx context.Context, env []string, keycloakURL string, realm string, testUsername string, adminSecretName string, namespace string) error { + // Check if the vip-test-credentials secret already exists + checkCmd := exec.CommandContext(ctx, "kubectl", "get", "secret", vipTestCredentialsSecret, + "-n", namespace, "--ignore-not-found", "-o", "jsonpath={.metadata.name}") + checkCmd.Env = env + + output, err := checkCmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return fmt.Errorf("failed to check for existing credentials secret: %s", string(exitErr.Stderr)) + } + return fmt.Errorf("failed to check for existing credentials secret: %w", err) + } + if strings.TrimSpace(string(output)) == vipTestCredentialsSecret { + slog.Info("Test user credentials secret already exists, skipping creation") + return nil + } + + slog.Info("Creating test user in Keycloak") + + adminUser, adminPass, err := getSecretCredentials(ctx, env, adminSecretName, namespace) + if err != nil { + return fmt.Errorf("failed to get Keycloak admin credentials: %w", err) + } + + // Get admin access token + token, err := getKeycloakAdminToken(ctx, keycloakURL, adminUser, adminPass) + if err != nil { + return fmt.Errorf("failed to get admin token: %w", err) + } + + // Create test user with a randomly generated password + username := testUsername + password, err := generatePassword(32) + if err != nil { + return fmt.Errorf("failed to generate password: %w", err) + } + + if err := createKeycloakUser(ctx, keycloakURL, realm, token, username, password); err != nil { + return fmt.Errorf("failed to create test user: %w", err) + } + + // Create the vip-test-credentials secret + if err := createCredentialsSecret(ctx, env, username, password, namespace); err != nil { + return fmt.Errorf("failed to create credentials secret: %w", err) + } + + slog.Info("Test user created successfully", "username", username) + return nil +} + +// generatePassword generates a cryptographically random password of the given length. +func generatePassword(length int) (string, error) { + const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%" + result := make([]byte, length) + for i := range result { + n, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars)))) + if err != nil { + return "", err + } + result[i] = chars[n.Int64()] + } + return string(result), nil +} + +// getSecretCredentials retrieves username and password from a Kubernetes Secret. +func getSecretCredentials(ctx context.Context, env []string, secretName string, namespace string) (string, string, error) { + cmd := exec.CommandContext(ctx, "kubectl", "get", "secret", secretName, + "-n", namespace, + "-o", "jsonpath={.data.username} {.data.password}") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return "", "", fmt.Errorf("kubectl get secret failed: %s", string(exitErr.Stderr)) + } + return "", "", fmt.Errorf("kubectl get secret failed: %w", err) + } + + return parseSecretData(string(output)) +} + +// parseSecretData parses kubectl jsonpath output of the form " " +// and returns the decoded username and password. +func parseSecretData(output string) (string, string, error) { + parts := strings.Fields(strings.TrimSpace(output)) + if len(parts) != 2 { + return "", "", fmt.Errorf("unexpected secret format") + } + + // Decode base64 values using stdlib (portable, no subprocess) + usernameBytes, err := base64.StdEncoding.DecodeString(parts[0]) + if err != nil { + return "", "", fmt.Errorf("failed to decode username: %w", err) + } + + passwordBytes, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", "", fmt.Errorf("failed to decode password: %w", err) + } + + return string(usernameBytes), string(passwordBytes), nil +} + +// getKeycloakAdminToken gets an admin access token from Keycloak's master realm. +// Admin tokens are always obtained from the master realm, regardless of the target realm. +func getKeycloakAdminToken(ctx context.Context, keycloakURL, username, password string) (string, error) { + tokenURL := fmt.Sprintf("%s/realms/master/protocol/openid-connect/token", keycloakURL) + + data := url.Values{ + "grant_type": {"password"}, + "client_id": {"admin-cli"}, + "username": {username}, + "password": {password}, + } + req, err := http.NewRequestWithContext(ctx, "POST", tokenURL, strings.NewReader(data.Encode())) + if err != nil { + return "", err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + client := &http.Client{Timeout: keycloakHTTPTimeout} + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("token request failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result struct { + AccessToken string `json:"access_token"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return "", err + } + + if result.AccessToken == "" { + return "", fmt.Errorf("access_token missing from token response") + } + + return result.AccessToken, nil +} + +// createKeycloakUser creates a user in Keycloak, or resets their password if they already exist. +// Resetting the password when the user exists ensures the K8s secret (written after this call) +// always matches the actual Keycloak credentials. +func createKeycloakUser(ctx context.Context, keycloakURL, realm, token, username, password string) error { + client := &http.Client{Timeout: keycloakHTTPTimeout} + usersURL := fmt.Sprintf("%s/admin/realms/%s/users", keycloakURL, url.PathEscape(realm)) + + // Check if user already exists; use url.Values to safely encode the username. + params := url.Values{"username": {username}, "exact": {"true"}} + searchURL := usersURL + "?" + params.Encode() + req, err := http.NewRequestWithContext(ctx, "GET", searchURL, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+token) + + searchResp, err := client.Do(req) + if err != nil { + return err + } + + if searchResp.StatusCode == http.StatusOK { + body, _ := io.ReadAll(searchResp.Body) + searchResp.Body.Close() + var users []map[string]interface{} + if err := json.Unmarshal(body, &users); err == nil && len(users) > 0 { + slog.Info("User already exists in Keycloak, resetting password", "username", username) + userID, ok := users[0]["id"].(string) + if !ok || userID == "" { + return fmt.Errorf("could not extract user ID from Keycloak search response") + } + return resetKeycloakUserPassword(ctx, keycloakURL, realm, token, userID, password, client) + } + } else { + slog.Warn("user search failed, attempting create", "status", searchResp.StatusCode) + io.Copy(io.Discard, searchResp.Body) + searchResp.Body.Close() + } + + // Create user with password + userPayload := map[string]interface{}{ + "username": username, + "enabled": true, + "emailVerified": true, + "credentials": []map[string]interface{}{ + { + "type": "password", + "value": password, + "temporary": false, + }, + }, + } + + payloadBytes, err := json.Marshal(userPayload) + if err != nil { + return err + } + + req, err = http.NewRequestWithContext(ctx, "POST", usersURL, bytes.NewReader(payloadBytes)) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode == http.StatusConflict { + // User exists (possibly created concurrently, or the initial search lacked permissions). + // Re-search to obtain the user ID and reset the password. + slog.Info("User already exists (409 on create), re-searching to reset password", "username", username) + req2, err := http.NewRequestWithContext(ctx, "GET", searchURL, nil) + if err != nil { + return err + } + req2.Header.Set("Authorization", "Bearer "+token) + searchResp2, err := client.Do(req2) + if err != nil { + return err + } + body2, _ := io.ReadAll(searchResp2.Body) + searchResp2.Body.Close() + if searchResp2.StatusCode != http.StatusOK { + return fmt.Errorf("re-search after 409 failed with status %d: %s", searchResp2.StatusCode, string(body2)) + } + var users2 []map[string]interface{} + if err := json.Unmarshal(body2, &users2); err != nil || len(users2) == 0 { + return fmt.Errorf("could not find user after 409 conflict: %s", string(body)) + } + userID, ok := users2[0]["id"].(string) + if !ok || userID == "" { + return fmt.Errorf("could not extract user ID after 409 conflict") + } + return resetKeycloakUserPassword(ctx, keycloakURL, realm, token, userID, password, client) + } + return fmt.Errorf("create user failed with status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + +// resetKeycloakUserPassword sets a user's password via the Keycloak admin API. +func resetKeycloakUserPassword(ctx context.Context, keycloakURL, realm, token, userID, password string, client *http.Client) error { + resetURL := fmt.Sprintf("%s/admin/realms/%s/users/%s/reset-password", keycloakURL, url.PathEscape(realm), url.PathEscape(userID)) + payload := map[string]interface{}{ + "type": "password", + "value": password, + "temporary": false, + } + payloadBytes, err := json.Marshal(payload) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, "PUT", resetURL, bytes.NewReader(payloadBytes)) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("reset password failed with status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + +// buildSecretSpec constructs a Kubernetes Secret spec as a map ready for JSON marshalling. +func buildSecretSpec(name, namespace, username, password string) map[string]interface{} { + return map[string]interface{}{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + "labels": map[string]string{ + "app.kubernetes.io/name": "vip-verify", + "app.kubernetes.io/managed-by": "ptd", + }, + }, + "type": "Opaque", + "data": map[string]string{ + "username": base64.StdEncoding.EncodeToString([]byte(username)), + "password": base64.StdEncoding.EncodeToString([]byte(password)), + }, + } +} + +// createCredentialsSecret creates a K8s secret with test user credentials. +// Uses JSON marshalling to prevent injection, consistent with job.go. +func createCredentialsSecret(ctx context.Context, env []string, username, password string, namespace string) error { + secret := buildSecretSpec(vipTestCredentialsSecret, namespace, username, password) + + secretJSON, err := json.Marshal(secret) + if err != nil { + return fmt.Errorf("failed to marshal secret: %w", err) + } + + cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", "-", "-n", namespace) + cmd.Env = env + cmd.Stdin = strings.NewReader(string(secretJSON)) + + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("kubectl apply secret failed: %s", string(output)) + } + + return nil +} diff --git a/cmd/internal/verify/keycloak_test.go b/cmd/internal/verify/keycloak_test.go new file mode 100644 index 0000000..0bcbc63 --- /dev/null +++ b/cmd/internal/verify/keycloak_test.go @@ -0,0 +1,169 @@ +package verify + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" +) + +func TestGetKeycloakAdminToken_NonOKStatus(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"error":"invalid_grant"}`)) + })) + defer srv.Close() + + _, err := getKeycloakAdminToken(context.Background(), srv.URL, "admin", "wrongpass") + if err == nil { + t.Fatal("expected error for 401 response, got nil") + } +} + +func TestGetKeycloakAdminToken_InvalidJSON(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`not-json`)) + })) + defer srv.Close() + + _, err := getKeycloakAdminToken(context.Background(), srv.URL, "admin", "pass") + if err == nil { + t.Fatal("expected error for invalid JSON response, got nil") + } +} + +func TestGetKeycloakAdminToken_EmptyAccessToken(t *testing.T) { + // A 200 response with valid JSON but no access_token field should return an error. + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"token_type":"Bearer"}`)) + })) + defer srv.Close() + + _, err := getKeycloakAdminToken(context.Background(), srv.URL, "admin", "pass") + if err == nil { + t.Fatal("expected error when access_token is absent from response, got nil") + } +} + +func TestCreateKeycloakUser_CreateFails(t *testing.T) { + // Search returns empty list, create returns 409, re-search also returns empty list → error. + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + w.WriteHeader(http.StatusOK) + w.Write([]byte(`[]`)) + case http.MethodPost: + w.WriteHeader(http.StatusConflict) + w.Write([]byte(`{"errorMessage":"User exists with same username"}`)) + } + })) + defer srv.Close() + + err := createKeycloakUser(context.Background(), srv.URL, "myrealm", "token", "user", "pass") + if err == nil { + t.Fatal("expected error when re-search after 409 finds no user, got nil") + } +} + +func TestCreateKeycloakUser_409ResearchResetsPassword(t *testing.T) { + // Search fails with 403, create returns 409, re-search finds the user → reset password. + userID := "found-user-id" + getCount := 0 + resetCalled := false + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + getCount++ + if getCount == 1 { + // First search: permission denied + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"error":"access_denied"}`)) + } else { + // Re-search after 409: user found + users := []map[string]interface{}{{"id": userID, "username": "user"}} + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(users) + } + case http.MethodPost: + w.WriteHeader(http.StatusConflict) + w.Write([]byte(`{"errorMessage":"User exists with same username"}`)) + case http.MethodPut: + resetCalled = true + w.WriteHeader(http.StatusNoContent) + } + })) + defer srv.Close() + + err := createKeycloakUser(context.Background(), srv.URL, "myrealm", "token", "user", "pass") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !resetCalled { + t.Error("expected password reset to be called after 409 re-search") + } +} + +func TestCreateKeycloakUser_SearchReturnsError(t *testing.T) { + // Search returns 403 Forbidden (insufficient permissions) + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"error":"access_denied"}`)) + })) + defer srv.Close() + + // Non-200 search falls through to create attempt; create also gets 403. + // Either way an error should be returned. + err := createKeycloakUser(context.Background(), srv.URL, "myrealm", "token", "user", "pass") + if err == nil { + t.Fatal("expected error when create returns 403, got nil") + } +} + +func TestResetKeycloakUserPassword_NonNoContentStatus(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"errorMessage":"Invalid password policy"}`)) + })) + defer srv.Close() + + client := &http.Client{} + err := resetKeycloakUserPassword(context.Background(), srv.URL, "myrealm", "token", "user-id", "newpass", client) + if err == nil { + t.Fatal("expected error for 400 reset response, got nil") + } +} + +func TestCreateKeycloakUser_ExistingUserResetsPassword(t *testing.T) { + userID := "abc-123" + searchCalled := false + resetCalled := false + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + searchCalled = true + w.WriteHeader(http.StatusOK) + users := []map[string]interface{}{{"id": userID, "username": "existing-user"}} + json.NewEncoder(w).Encode(users) + case http.MethodPut: + resetCalled = true + w.WriteHeader(http.StatusNoContent) + } + })) + defer srv.Close() + + err := createKeycloakUser(context.Background(), srv.URL, "myrealm", "token", "existing-user", "newpass") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !searchCalled { + t.Error("expected search to be called") + } + if !resetCalled { + t.Error("expected password reset to be called for existing user") + } +} diff --git a/cmd/internal/verify/verify.go b/cmd/internal/verify/verify.go new file mode 100644 index 0000000..02e5765 --- /dev/null +++ b/cmd/internal/verify/verify.go @@ -0,0 +1,392 @@ +package verify + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "log/slog" + "os" + "os/exec" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +// Options contains configuration for the verify command +type Options struct { + Target string + SiteName string + Namespace string // Kubernetes namespace (default: posit-team) + Categories string + LocalMode bool + ConfigOnly bool + Image string + KeycloakURL string // overrides the default https://key. if set + Realm string // Keycloak realm (default: posit) + TestUsername string // Keycloak test user name (default: vip-test-user) + KeycloakAdminSecret string // overrides the default {siteName}-keycloak-initial-admin + InteractiveAuth bool // mint credentials via interactive browser login + Timeout time.Duration // WaitForJob timeout (default: 15 minutes) + Env []string +} + +// Run executes the VIP verification process +func Run(ctx context.Context, opts Options) error { + slog.Info("Starting VIP verification", "target", opts.Target, "site", opts.SiteName) + + // Get Site CR from Kubernetes + slog.Info("Fetching Site CR from Kubernetes") + siteYAML, err := getSiteCR(ctx, opts.Env, opts.SiteName, opts.Namespace) + if err != nil { + return fmt.Errorf("failed to get Site CR: %w", err) + } + + // Parse Site CR once + var site SiteCR + if err := yaml.Unmarshal(siteYAML, &site); err != nil { + return fmt.Errorf("failed to parse Site CR: %w", err) + } + + // Generate VIP config + slog.Info("Generating VIP configuration") + vipConfig, err := GenerateConfig(&site, opts.Target) + if err != nil { + return fmt.Errorf("failed to generate VIP config: %w", err) + } + + // If config-only mode, just print and exit + if opts.ConfigOnly { + fmt.Println(vipConfig) + return nil + } + + // Handle credentials based on authentication mode + var credentialsAvailable bool + if opts.InteractiveAuth { + // Interactive auth mode: mint credentials via VIP CLI and kubectl exec + slog.Info("Using interactive authentication mode") + + // Get Connect URL from site + connectURL := "" + if site.Spec.Connect != nil { + connectURL = buildProductURL(site.Spec.Connect, "connect", site.Spec.Domain) + } + if connectURL == "" { + return fmt.Errorf("Connect is not configured for this site; interactive auth requires Connect") + } + + // Mint Connect API key via VIP CLI + slog.Info("Minting Connect API key via VIP CLI") + apiKey, keyName, err := MintConnectKey(ctx, connectURL) + if err != nil { + return fmt.Errorf("failed to mint Connect API key: %w", err) + } + + // Generate Workbench token + slog.Info("Generating Workbench API token") + workbenchToken, err := GenerateWorkbenchToken(ctx, opts.Env, opts.Namespace, opts.SiteName, opts.TestUsername) + if err != nil { + return fmt.Errorf("failed to generate Workbench token: %w", err) + } + + // Generate Package Manager token + slog.Info("Generating Package Manager token") + pmToken, err := GeneratePackageManagerToken(ctx, opts.Env, opts.Namespace, opts.SiteName) + if err != nil { + return fmt.Errorf("failed to generate Package Manager token: %w", err) + } + + // Save all credentials to K8s Secret + creds := map[string]string{ + "VIP_CONNECT_API_KEY": apiKey, + "VIP_CONNECT_KEY_NAME": keyName, + "VIP_WORKBENCH_API_KEY": workbenchToken, + "VIP_PM_TOKEN": pmToken, + } + slog.Info("Saving credentials to Kubernetes Secret") + if err := SaveCredentialsSecret(ctx, opts.Env, opts.Namespace, creds); err != nil { + return fmt.Errorf("failed to save credentials secret: %w", err) + } + + credentialsAvailable = true + } else { + // Keycloak mode: ensure test user exists + credentialsAvailable = site.Spec.Keycloak != nil && site.Spec.Keycloak.Enabled + keycloakURL, err := deriveKeycloakURL(opts.KeycloakURL, site.Spec.Domain, credentialsAvailable) + if err != nil { + return err + } + if credentialsAvailable { + adminSecretName := opts.KeycloakAdminSecret + if adminSecretName == "" { + adminSecretName = fmt.Sprintf("%s-keycloak-initial-admin", opts.SiteName) + } + slog.Info("Ensuring test user exists in Keycloak") + if err := EnsureTestUser(ctx, opts.Env, keycloakURL, opts.Realm, opts.TestUsername, adminSecretName, opts.Namespace); err != nil { + return fmt.Errorf("failed to ensure test user: %w", err) + } + } else { + slog.Info("Keycloak not configured for this site, skipping test user creation") + } + } + + // Run tests based on mode + if opts.LocalMode { + return runLocalTests(ctx, opts, vipConfig, credentialsAvailable) + } + + return runKubernetesTests(ctx, opts, vipConfig, credentialsAvailable) +} + +// getSiteCR retrieves the Site CR YAML from Kubernetes +func getSiteCR(ctx context.Context, env []string, siteName, namespace string) ([]byte, error) { + cmd := exec.CommandContext(ctx, "kubectl", "get", "site", siteName, + "-n", namespace, + "-o", "yaml") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("kubectl get site failed: %s", string(exitErr.Stderr)) + } + return nil, fmt.Errorf("kubectl get site failed: %w", err) + } + + return output, nil +} + +// deriveKeycloakURL returns the Keycloak URL to use. If override is non-empty it is returned +// as-is. Otherwise the URL is derived from domain. An error is returned only when Keycloak is +// enabled (needsURL) and domain is empty, which would produce an invalid URL. +func deriveKeycloakURL(override, domain string, needsURL bool) (string, error) { + if override != "" { + return override, nil + } + if needsURL && domain == "" { + return "", fmt.Errorf("site domain is required to derive Keycloak URL; use --keycloak-url to override") + } + if domain == "" { + return "", nil + } + return fmt.Sprintf("https://key.%s", domain), nil +} + +// runLocalTests runs VIP tests locally using uv +func runLocalTests(ctx context.Context, opts Options, vipConfig string, credentialsAvailable bool) error { + slog.Info("Running VIP tests locally") + + // Create temporary config file + tmpfile, err := os.CreateTemp("", "vip-config-*.toml") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(tmpfile.Name()) + + if _, err := tmpfile.WriteString(vipConfig); err != nil { + tmpfile.Close() + return fmt.Errorf("failed to write config: %w", err) + } + tmpfile.Close() + + // Run pytest with uv + args := []string{"run", "pytest", "--config", tmpfile.Name(), "--tb=short", "-v"} + if opts.Categories != "" { + args = append(args, "-m", opts.Categories) + } + + cmd := exec.CommandContext(ctx, "uv", args...) + if credentialsAvailable { + if opts.InteractiveAuth { + // Interactive auth mode: fetch API tokens from Secret + localEnv, err := buildLocalEnvWithTokens(ctx, opts.Env, opts.Namespace) + if err != nil { + return err + } + cmd.Env = localEnv + } else { + // Keycloak mode: fetch username/password from Secret + testUser, testPass, err := getSecretCredentials(ctx, opts.Env, vipTestCredentialsSecret, opts.Namespace) + if err != nil { + return fmt.Errorf("failed to get test credentials: %w", err) + } + localEnv, err := buildLocalEnv(opts.Env, testUser, testPass) + if err != nil { + return err + } + cmd.Env = localEnv + } + } else { + cmd.Env = opts.Env + } + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("VIP tests failed: %w", err) + } + + fmt.Println("\nVIP tests completed successfully") + return nil +} + +// buildLocalEnv constructs the environment for a local uv invocation. +// It strips any pre-existing VIP_TEST_USERNAME/VIP_TEST_PASSWORD entries from env +// (preventing duplicates when the caller's environment already exports them), +// then appends the provided credentials. Returns an error if credentials contain +// newline characters. +func buildLocalEnv(env []string, testUser, testPass string) ([]string, error) { + if strings.ContainsAny(testUser, "\n\r\x00") || strings.ContainsAny(testPass, "\n\r\x00") { + return nil, fmt.Errorf("test credentials must not contain newline or null characters") + } + result := make([]string, 0, len(env)+2) + for _, e := range env { + if !strings.HasPrefix(e, "VIP_TEST_USERNAME=") && !strings.HasPrefix(e, "VIP_TEST_PASSWORD=") { + result = append(result, e) + } + } + return append(result, "VIP_TEST_USERNAME="+testUser, "VIP_TEST_PASSWORD="+testPass), nil +} + +// buildLocalEnvWithTokens fetches API tokens from the K8s Secret and constructs +// the environment for local VIP runs with interactive auth. +func buildLocalEnvWithTokens(ctx context.Context, env []string, namespace string) ([]string, error) { + // Get the Secret + cmd := exec.CommandContext(ctx, "kubectl", "get", "secret", vipTestCredentialsSecret, + "-n", namespace, + "-o", "json") + cmd.Env = env + + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("kubectl get secret failed: %s", string(exitErr.Stderr)) + } + return nil, fmt.Errorf("kubectl get secret failed: %w", err) + } + + var secret struct { + Data map[string]string `json:"data"` + } + if err := json.Unmarshal(output, &secret); err != nil { + return nil, fmt.Errorf("failed to parse secret: %w", err) + } + + // Decode and extract tokens + tokenKeys := []string{"VIP_CONNECT_API_KEY", "VIP_WORKBENCH_API_KEY", "VIP_PM_TOKEN"} + tokens := make(map[string]string) + for _, key := range tokenKeys { + if b64Value, ok := secret.Data[key]; ok { + decoded, err := base64.StdEncoding.DecodeString(b64Value) + if err != nil { + return nil, fmt.Errorf("failed to decode %s: %w", key, err) + } + tokens[key] = string(decoded) + } + } + + // Strip any pre-existing token env vars from env + result := make([]string, 0, len(env)+len(tokens)) + for _, e := range env { + skip := false + for key := range tokens { + if strings.HasPrefix(e, key+"=") { + skip = true + break + } + } + if !skip { + result = append(result, e) + } + } + + // Append token env vars + for key, value := range tokens { + result = append(result, key+"="+value) + } + + return result, nil +} + +// randomHex returns n random hex-encoded bytes (2n hex characters). +func randomHex(n int) (string, error) { + b := make([]byte, n) + if _, err := rand.Read(b); err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} + +// runKubernetesTests runs VIP tests as a Kubernetes Job +func runKubernetesTests(ctx context.Context, opts Options, vipConfig string, credentialsAvailable bool) error { + suffix, err := randomHex(3) // 6 hex chars + if err != nil { + return fmt.Errorf("failed to generate name suffix: %w", err) + } + timestamp := time.Now().Format("20060102150405") + jobName := fmt.Sprintf("vip-verify-%s-%s", timestamp, suffix) + configName := fmt.Sprintf("vip-verify-config-%s-%s", timestamp, suffix) + + slog.Info("Creating ConfigMap", "name", configName) + if err := CreateConfigMap(ctx, opts.Env, configName, vipConfig, opts.Namespace); err != nil { + return fmt.Errorf("failed to create ConfigMap: %w", err) + } + + // Clean up resources on exit using a fresh context so cleanup succeeds + // even if the caller context has expired after the job wait. + defer func() { + slog.Debug("Cleaning up resources") + cleanupCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := Cleanup(cleanupCtx, opts.Env, jobName, configName, opts.Namespace); err != nil { + slog.Warn("Failed to cleanup resources", "error", err) + } + }() + + slog.Info("Creating VIP verification Job", "name", jobName) + jobOpts := JobOptions{ + Image: opts.Image, + Categories: opts.Categories, + JobName: jobName, + ConfigName: configName, + Namespace: opts.Namespace, + CredentialsAvailable: credentialsAvailable, + InteractiveAuth: opts.InteractiveAuth, + Timeout: opts.Timeout, + } + + if err := CreateJob(ctx, opts.Env, jobOpts); err != nil { + return fmt.Errorf("failed to create Job: %w", err) + } + + slog.Info("Streaming Job logs") + if err := StreamLogs(ctx, opts.Env, jobName, opts.Namespace, opts.Timeout); err != nil { + if errors.Is(err, errImagePull) { + return err + } + slog.Warn("Failed to stream logs", "error", err) + } + + slog.Info("Waiting for Job to complete") + timeout := opts.Timeout + if timeout == 0 { + timeout = 15 * time.Minute + } + success, err := WaitForJob(ctx, opts.Env, jobName, opts.Namespace, timeout) + if err != nil { + return fmt.Errorf("failed to wait for Job: %w", err) + } + + if !success { + fmt.Println("\nVIP verification failed") + return fmt.Errorf("VIP tests failed") + } + + fmt.Println("\nVIP verification completed successfully") + return nil +} diff --git a/cmd/internal/verify/verify_test.go b/cmd/internal/verify/verify_test.go new file mode 100644 index 0000000..83e69dd --- /dev/null +++ b/cmd/internal/verify/verify_test.go @@ -0,0 +1,667 @@ +package verify + +import ( + "context" + "encoding/base64" + "encoding/json" + "strings" + "testing" + "time" +) + +func TestParseSecretData_InvalidFormat(t *testing.T) { + cases := []struct { + name string + output string + }{ + {"empty", ""}, + {"whitespace only", " "}, + {"single field", "dGVzdA=="}, + {"three fields", "dGVzdA== dGVzdA== dGVzdA=="}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + _, _, err := parseSecretData(tc.output) + if err == nil { + t.Fatalf("expected error for input %q, got nil", tc.output) + } + }) + } +} + +func TestParseSecretData_InvalidBase64(t *testing.T) { + _, _, err := parseSecretData("not-valid-base64!!! dGVzdA==") + if err == nil { + t.Fatal("expected error for invalid base64 in username field, got nil") + } + + _, _, err = parseSecretData("dGVzdA== not-valid-base64!!!") + if err == nil { + t.Fatal("expected error for invalid base64 in password field, got nil") + } +} + +func TestParseSecretData_Valid(t *testing.T) { + user := base64.StdEncoding.EncodeToString([]byte("admin")) + pass := base64.StdEncoding.EncodeToString([]byte("s3cr3t")) + + gotUser, gotPass, err := parseSecretData(user + " " + pass) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gotUser != "admin" { + t.Errorf("got username %q, want %q", gotUser, "admin") + } + if gotPass != "s3cr3t" { + t.Errorf("got password %q, want %q", gotPass, "s3cr3t") + } +} + +func TestWaitForJob_ContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // cancel immediately so ctx.Done() fires on first select + + _, err := WaitForJob(ctx, nil, "test-job", "test-namespace", time.Minute) + if err == nil { + t.Fatal("expected error for cancelled context, got nil") + } +} + +func TestGenerateConfig_NilSite(t *testing.T) { + _, err := GenerateConfig(nil, "test") + if err == nil { + t.Fatal("expected error for nil site, got nil") + } +} + +func TestGenerateConfig_ConnectOnly(t *testing.T) { + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "example.com", + Connect: &ProductSpec{ + Auth: &AuthSpec{Type: "saml"}, + }, + }, + } + + config, err := GenerateConfig(site, "my-deployment") + if err != nil { + t.Fatalf("GenerateConfig returned error: %v", err) + } + + if config == "" { + t.Fatal("expected non-empty config") + } + + // Auth provider should come from Connect when present + if !strings.Contains(config, `provider = "saml"`) { + t.Errorf("expected saml auth provider, got:\n%s", config) + } + + // Connect should be enabled with the correct URL + if !strings.Contains(config, `url = "https://connect.example.com"`) { + t.Errorf("expected connect URL, got:\n%s", config) + } + + // Workbench section should be present (disabled) + if !strings.Contains(config, `[workbench]`) { + t.Errorf("expected workbench section, got:\n%s", config) + } +} + +func TestGenerateConfig_AuthProviderPrecedence(t *testing.T) { + // Connect auth takes precedence over Workbench auth + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "example.com", + Connect: &ProductSpec{ + Auth: &AuthSpec{Type: "saml"}, + }, + Workbench: &ProductSpec{ + Auth: &AuthSpec{Type: "ldap"}, + }, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("GenerateConfig returned error: %v", err) + } + + if !strings.Contains(config, `provider = "saml"`) { + t.Errorf("expected Connect auth (saml) to win, got:\n%s", config) + } +} + +func TestGenerateConfig_WorkbenchAuthFallback(t *testing.T) { + // When Connect has no auth spec, fall back to Workbench auth + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "example.com", + Connect: &ProductSpec{}, + Workbench: &ProductSpec{ + Auth: &AuthSpec{Type: "ldap"}, + }, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("GenerateConfig returned error: %v", err) + } + + if !strings.Contains(config, `provider = "ldap"`) { + t.Errorf("expected Workbench auth (ldap) as fallback, got:\n%s", config) + } +} + +func TestGenerateConfig_DefaultAuth(t *testing.T) { + // When no product has an auth spec, default to oidc + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "example.com", + Connect: &ProductSpec{}, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("GenerateConfig returned error: %v", err) + } + + if !strings.Contains(config, `provider = "oidc"`) { + t.Errorf("expected default oidc auth, got:\n%s", config) + } +} + +func TestGenerateConfig_CustomDomainPrefix(t *testing.T) { + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "example.com", + Connect: &ProductSpec{ + DomainPrefix: "rsconnect", + }, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("GenerateConfig returned error: %v", err) + } + + if !strings.Contains(config, `url = "https://rsconnect.example.com"`) { + t.Errorf("expected custom domain prefix in URL, got:\n%s", config) + } +} + +func TestGenerateConfig_EmptyAuthType(t *testing.T) { + // Auth.Type == "" should fall through to the default "oidc", not produce provider = "" + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "example.com", + Connect: &ProductSpec{ + Auth: &AuthSpec{Type: ""}, + }, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("GenerateConfig returned error: %v", err) + } + + if !strings.Contains(config, `provider = "oidc"`) { + t.Errorf("expected oidc default when Auth.Type is empty, got:\n%s", config) + } +} + +func TestGenerateConfig_EmptyDomain(t *testing.T) { + // Empty domain with a product that has no per-product baseDomain should return an error + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "", + Connect: &ProductSpec{}, + }, + } + + _, err := GenerateConfig(site, "test") + if err == nil { + t.Fatal("expected error for empty domain with configured product and no baseDomain, got nil") + } +} + +func TestGenerateConfig_EmptyDomainAllBaseDomains(t *testing.T) { + // Empty site-level domain is valid when every product has its own baseDomain. + // BaseDomain must be a bare parent domain (e.g. "custom.org"); buildProductURL + // prepends the product prefix to produce "https://connect.custom.org". + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "", + Connect: &ProductSpec{ + BaseDomain: "custom.org", + }, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("expected no error when all products have baseDomain, got: %v", err) + } + if !strings.Contains(config, `url = "https://connect.custom.org"`) { + t.Errorf("expected connect URL using baseDomain, got:\n%s", config) + } +} + +func TestGenerateConfig_BaseDomainWithSubdomainProducesDoublePrefix(t *testing.T) { + // If BaseDomain is mistakenly set to a fully-qualified hostname like + // "connect.custom.org" instead of the bare parent "custom.org", buildProductURL + // prepends the product prefix again, producing a double-prefix URL. + // This test documents that footgun so the behaviour is explicit and visible. + site := &SiteCR{ + Spec: SiteSpec{ + Domain: "", + Connect: &ProductSpec{ + BaseDomain: "connect.custom.org", + }, + }, + } + + config, err := GenerateConfig(site, "test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(config, `url = "https://connect.connect.custom.org"`) { + t.Errorf("expected double-prefix URL from subdomain BaseDomain, got:\n%s", config) + } +} + +func TestDeriveKeycloakURL(t *testing.T) { + // Override takes precedence over domain. + got, err := deriveKeycloakURL("https://custom.example.com", "", true) + if err != nil || got != "https://custom.example.com" { + t.Fatalf("expected override URL, got %q, err %v", got, err) + } + + // Empty domain with Keycloak enabled and no override returns an error. + _, err = deriveKeycloakURL("", "", true) + if err == nil { + t.Fatal("expected error for empty domain when Keycloak is enabled, got nil") + } + + // Empty domain with Keycloak disabled is not an error (URL won't be used). + // Returns "" rather than a malformed "https://key." URL. + got, err = deriveKeycloakURL("", "", false) + if err != nil { + t.Fatalf("unexpected error when Keycloak disabled: %v", err) + } + if got != "" { + t.Errorf("unexpected URL %q when Keycloak disabled", got) + } + + // Domain is used when no override is set and Keycloak is enabled. + got, err = deriveKeycloakURL("", "example.com", true) + if err != nil || got != "https://key.example.com" { + t.Fatalf("expected derived URL, got %q, err %v", got, err) + } +} + +func TestBuildProductURL_BaseDomainOverride(t *testing.T) { + spec := &ProductSpec{ + BaseDomain: "custom.org", + } + got := buildProductURL(spec, "connect", "example.com") + want := "https://connect.custom.org" + if got != want { + t.Errorf("buildProductURL with BaseDomain = %q, want %q", got, want) + } +} + +func TestBuildProductURL_DomainPrefixAndBaseDomain(t *testing.T) { + spec := &ProductSpec{ + DomainPrefix: "rsc", + BaseDomain: "custom.org", + } + got := buildProductURL(spec, "connect", "example.com") + want := "https://rsc.custom.org" + if got != want { + t.Errorf("buildProductURL with DomainPrefix+BaseDomain = %q, want %q", got, want) + } +} + +func TestBuildJobSpec(t *testing.T) { + tests := []struct { + name string + opts JobOptions + wantAPIVersion string + wantKind string + wantContainerEnv bool + wantCategories bool + }{ + { + name: "basic spec without credentials", + opts: JobOptions{ + Image: "vip:latest", + JobName: "vip-test-123", + ConfigName: "vip-config-123", + Namespace: "default", + CredentialsAvailable: false, + }, + wantAPIVersion: "batch/v1", + wantKind: "Job", + wantContainerEnv: false, + }, + { + name: "spec with credentials injects env vars", + opts: JobOptions{ + Image: "vip:latest", + JobName: "vip-test-456", + ConfigName: "vip-config-456", + Namespace: "test-ns", + CredentialsAvailable: true, + }, + wantAPIVersion: "batch/v1", + wantKind: "Job", + wantContainerEnv: true, + }, + { + name: "spec with categories adds -m flag", + opts: JobOptions{ + Image: "vip:latest", + JobName: "vip-test-789", + ConfigName: "vip-config-789", + Namespace: "default", + Categories: "smoke", + }, + wantAPIVersion: "batch/v1", + wantKind: "Job", + wantCategories: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + spec := buildJobSpec(tt.opts) + + // Verify by round-tripping through JSON (same path as production code). + data, err := json.Marshal(spec) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + var parsed map[string]interface{} + if err := json.Unmarshal(data, &parsed); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + + if parsed["apiVersion"] != tt.wantAPIVersion { + t.Errorf("apiVersion = %v, want %v", parsed["apiVersion"], tt.wantAPIVersion) + } + if parsed["kind"] != tt.wantKind { + t.Errorf("kind = %v, want %v", parsed["kind"], tt.wantKind) + } + + // Verify metadata name and labels. + meta := parsed["metadata"].(map[string]interface{}) + if meta["name"] != tt.opts.JobName { + t.Errorf("metadata.name = %v, want %v", meta["name"], tt.opts.JobName) + } + labels := meta["labels"].(map[string]interface{}) + if labels["app.kubernetes.io/managed-by"] != "ptd" { + t.Errorf("managed-by label = %v, want ptd", labels["app.kubernetes.io/managed-by"]) + } + + // Drill down to the container spec. + jobSpec := parsed["spec"].(map[string]interface{}) + podTemplate := jobSpec["template"].(map[string]interface{}) + podSpec := podTemplate["spec"].(map[string]interface{}) + containers := podSpec["containers"].([]interface{}) + if len(containers) != 1 { + t.Fatalf("expected 1 container, got %d", len(containers)) + } + container := containers[0].(map[string]interface{}) + + if container["image"] != tt.opts.Image { + t.Errorf("container image = %v, want %v", container["image"], tt.opts.Image) + } + + // Check volume mount path. + mounts := container["volumeMounts"].([]interface{}) + if len(mounts) != 1 { + t.Fatalf("expected 1 volumeMount, got %d", len(mounts)) + } + mount := mounts[0].(map[string]interface{}) + if mount["mountPath"] != "/app/vip.toml" { + t.Errorf("mountPath = %v, want /app/vip.toml", mount["mountPath"]) + } + + // Check env vars are present/absent based on CredentialsAvailable. + _, hasEnv := container["env"] + if hasEnv != tt.wantContainerEnv { + t.Errorf("container env present = %v, want %v", hasEnv, tt.wantContainerEnv) + } + + // Check categories flag. + args := container["args"].([]interface{}) + hasM := false + for _, a := range args { + if a == "-m" { + hasM = true + break + } + } + if hasM != tt.wantCategories { + t.Errorf("args contains -m = %v, want %v", hasM, tt.wantCategories) + } + }) + } +} + +func TestBuildSecretSpec(t *testing.T) { + spec := buildSecretSpec("vip-test-credentials", "test-ns", "alice", "s3cr3t") + + data, err := json.Marshal(spec) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + var parsed map[string]interface{} + if err := json.Unmarshal(data, &parsed); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + + if parsed["apiVersion"] != "v1" { + t.Errorf("apiVersion = %v, want v1", parsed["apiVersion"]) + } + if parsed["kind"] != "Secret" { + t.Errorf("kind = %v, want Secret", parsed["kind"]) + } + if parsed["type"] != "Opaque" { + t.Errorf("type = %v, want Opaque", parsed["type"]) + } + + meta := parsed["metadata"].(map[string]interface{}) + if meta["name"] != "vip-test-credentials" { + t.Errorf("metadata.name = %v, want vip-test-credentials", meta["name"]) + } + if meta["namespace"] != "test-ns" { + t.Errorf("metadata.namespace = %v, want test-ns", meta["namespace"]) + } + labels := meta["labels"].(map[string]interface{}) + if labels["app.kubernetes.io/managed-by"] != "ptd" { + t.Errorf("managed-by label = %v, want ptd", labels["app.kubernetes.io/managed-by"]) + } + if labels["app.kubernetes.io/name"] != "vip-verify" { + t.Errorf("name label = %v, want vip-verify", labels["app.kubernetes.io/name"]) + } + + secretData := parsed["data"].(map[string]interface{}) + gotUser, _ := base64.StdEncoding.DecodeString(secretData["username"].(string)) + gotPass, _ := base64.StdEncoding.DecodeString(secretData["password"].(string)) + if string(gotUser) != "alice" { + t.Errorf("username = %v, want alice", string(gotUser)) + } + if string(gotPass) != "s3cr3t" { + t.Errorf("password = %v, want s3cr3t", string(gotPass)) + } +} + +func TestBuildLocalEnv(t *testing.T) { + tests := []struct { + name string + env []string + testUser string + testPass string + wantErr bool + }{ + { + name: "newline in username returns error", + env: []string{"PATH=/usr/bin"}, + testUser: "user\ninjected", + testPass: "pass", + wantErr: true, + }, + { + name: "carriage return in username returns error", + env: []string{"PATH=/usr/bin"}, + testUser: "user\rinjected", + testPass: "pass", + wantErr: true, + }, + { + name: "newline in password returns error", + env: []string{"PATH=/usr/bin"}, + testUser: "user", + testPass: "pass\ninjected", + wantErr: true, + }, + { + name: "null byte in username returns error", + env: []string{"PATH=/usr/bin"}, + testUser: "user\x00injected", + testPass: "pass", + wantErr: true, + }, + { + name: "null byte in password returns error", + env: []string{"PATH=/usr/bin"}, + testUser: "user", + testPass: "pass\x00injected", + wantErr: true, + }, + { + name: "existing cred keys are stripped before appending", + env: []string{"PATH=/usr/bin", "VIP_TEST_USERNAME=old", "VIP_TEST_PASSWORD=old"}, + testUser: "newuser", + testPass: "newpass", + }, + { + name: "clean env appends credentials", + env: []string{"PATH=/usr/bin"}, + testUser: "alice", + testPass: "s3cr3t", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildLocalEnv(tt.env, tt.testUser, tt.testPass) + if tt.wantErr { + if err == nil { + t.Fatal("expected error, got nil") + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Credentials must appear exactly once each. + usernameCount, passwordCount := 0, 0 + for _, e := range got { + if strings.HasPrefix(e, "VIP_TEST_USERNAME=") { + usernameCount++ + } + if strings.HasPrefix(e, "VIP_TEST_PASSWORD=") { + passwordCount++ + } + } + if usernameCount != 1 { + t.Errorf("VIP_TEST_USERNAME appears %d times, want 1", usernameCount) + } + if passwordCount != 1 { + t.Errorf("VIP_TEST_PASSWORD appears %d times, want 1", passwordCount) + } + + // Verify the appended values are correct. + wantUser := "VIP_TEST_USERNAME=" + tt.testUser + wantPass := "VIP_TEST_PASSWORD=" + tt.testPass + if !strings.Contains(strings.Join(got, "\n"), wantUser) { + t.Errorf("expected %q in env", wantUser) + } + if !strings.Contains(strings.Join(got, "\n"), wantPass) { + t.Errorf("expected %q in env", wantPass) + } + }) + } +} + +func TestParseJobStatus(t *testing.T) { + tests := []struct { + name string + output string + wantDone bool + wantSuccess bool + }{ + { + name: "job completed successfully", + output: "True,", + wantDone: true, + wantSuccess: true, + }, + { + name: "job failed - only Failed condition present (was false-positive before fix)", + output: ",True", + wantDone: true, + wantSuccess: false, + }, + { + name: "job failed - both conditions present", + output: "False,True", + wantDone: true, + wantSuccess: false, + }, + { + name: "both conditions set - complete wins", + output: "True,True", + wantDone: true, + wantSuccess: true, + }, + { + name: "job still running (no conditions yet)", + output: "", + wantDone: false, + wantSuccess: false, + }, + { + name: "job still running (False conditions)", + output: "False,False", + wantDone: false, + wantSuccess: false, + }, + { + name: "whitespace only", + output: " \n ", + wantDone: false, + wantSuccess: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotDone, gotSuccess := parseJobStatus(tt.output) + if gotDone != tt.wantDone || gotSuccess != tt.wantSuccess { + t.Errorf("parseJobStatus(%q) = (%v, %v), want (%v, %v)", + tt.output, gotDone, gotSuccess, tt.wantDone, tt.wantSuccess) + } + }) + } +} + diff --git a/cmd/verify.go b/cmd/verify.go new file mode 100644 index 0000000..a75d39c --- /dev/null +++ b/cmd/verify.go @@ -0,0 +1,297 @@ +package main + +import ( + "context" + "fmt" + "log/slog" + "os" + "os/exec" + "path" + "strings" + "time" + + "github.com/posit-dev/ptd/cmd/internal" + "github.com/posit-dev/ptd/cmd/internal/legacy" + "github.com/posit-dev/ptd/cmd/internal/verify" + "github.com/posit-dev/ptd/lib/kube" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" +) + +func init() { + rootCmd.AddCommand(verifyCmd) + verifyCmd.AddCommand(cleanupCmd) + + verifyCmd.Flags().StringVar(&verifySiteName, "site", "main", "Name of the Site CR to verify") + verifyCmd.Flags().StringVar(&verifyCategories, "categories", "", "Test categories to run (pytest -m marker)") + verifyCmd.Flags().BoolVar(&verifyLocal, "local", false, "Run tests locally instead of in Kubernetes") + verifyCmd.Flags().BoolVar(&verifyConfigOnly, "config-only", false, "Generate config only, don't run tests") + verifyCmd.Flags().StringVar(&verifyImage, "image", "ghcr.io/posit-dev/vip:latest", "VIP container image to use") + verifyCmd.Flags().StringVar(&verifyKeycloakURL, "keycloak-url", "", "Keycloak URL (defaults to https://key. from Site CR)") + verifyCmd.Flags().StringVar(&verifyRealm, "realm", "posit", "Keycloak realm name") + verifyCmd.Flags().StringVar(&verifyTestUsername, "test-username", "vip-test-user", "Keycloak test user name") + verifyCmd.Flags().StringVar(&verifyNamespace, "namespace", "posit-team", "Kubernetes namespace where PTD resources live") + verifyCmd.Flags().StringVar(&verifyKeycloakAdminSecret, "keycloak-admin-secret", "", "Name of the K8s secret holding Keycloak admin credentials (defaults to {site}-keycloak-initial-admin)") + verifyCmd.Flags().BoolVar(&verifyInteractiveAuth, "interactive-auth", false, "Mint credentials via interactive browser login (requires VIP CLI)") + verifyCmd.Flags().DurationVar(&verifyTimeout, "timeout", 15*time.Minute, "Timeout for waiting for the VIP verification Job to complete") +} + +var ( + verifySiteName string + verifyCategories string + verifyLocal bool + verifyConfigOnly bool + verifyImage string + verifyKeycloakURL string + verifyRealm string + verifyTestUsername string + verifyNamespace string + verifyKeycloakAdminSecret string + verifyInteractiveAuth bool + verifyTimeout time.Duration +) + +var verifyCmd = &cobra.Command{ + Use: "verify ", + Short: "Verify a PTD deployment with VIP tests", + Long: `Verify a PTD deployment by running VIP (Verified Installation of Posit) tests. + +This command: +1. Fetches the Site CR from Kubernetes +2. Generates a VIP configuration from the Site CR +3. Ensures a test user exists in Keycloak +4. Runs VIP tests either locally or as a Kubernetes Job + +Examples: + # Run all VIP tests against ganso01-staging + ptd verify ganso01-staging + + # Run only smoke tests + ptd verify ganso01-staging --categories smoke + + # Generate config only without running tests + ptd verify ganso01-staging --config-only + + # Run tests locally instead of in Kubernetes + ptd verify ganso01-staging --local + + # Verify a specific site (for multi-site deployments) + ptd verify ganso01-staging --site secondary`, + Args: cobra.ExactArgs(1), + ValidArgsFunction: legacy.ValidTargetArgs, + Run: func(cmd *cobra.Command, args []string) { + target := args[0] + runVerify(cmd.Context(), cmd, target) + }, +} + +var cleanupCmd = &cobra.Command{ + Use: "cleanup ", + Short: "Delete VIP test credentials and resources", + Long: `Delete VIP test credentials and resources created by the verify command. + +This command: +1. Reads the vip-test-credentials K8s Secret +2. Deletes the Connect API key via the Connect API +3. Deletes the vip-test-credentials K8s Secret + +Examples: + # Clean up test credentials for ganso01-staging + ptd verify cleanup ganso01-staging + + # Clean up test credentials for a specific site + ptd verify cleanup ganso01-staging --site secondary`, + Args: cobra.ExactArgs(1), + ValidArgsFunction: legacy.ValidTargetArgs, + Run: func(cmd *cobra.Command, args []string) { + target := args[0] + runCleanup(cmd.Context(), cmd, target) + }, +} + +func runCleanup(ctx context.Context, cmd *cobra.Command, target string) { + // Load target configuration + t, err := legacy.TargetFromName(target) + if err != nil { + slog.Error("Could not load target", "error", err) + os.Exit(1) + } + + // Get credentials + creds, err := t.Credentials(ctx) + if err != nil { + slog.Error("Failed to get credentials", "error", err) + os.Exit(1) + } + + credEnvVars := creds.EnvVars() + + // Start proxy if needed (non-fatal) + proxyFile := path.Join(internal.DataDir(), "proxy.json") + stopProxy, err := kube.StartProxy(ctx, t, proxyFile) + if err != nil { + slog.Warn("Failed to start proxy", "error", err) + } else { + defer stopProxy() + } + + // Set up kubeconfig + kubeconfigPath, err := kube.SetupKubeConfig(ctx, t, creds) + if err != nil { + slog.Error("Failed to setup kubeconfig", "error", err) + os.Exit(1) + } + + // Prepare environment variables for kubectl + keysToSet := make(map[string]bool, len(credEnvVars)+1) + for k := range credEnvVars { + keysToSet[k] = true + } + keysToSet["KUBECONFIG"] = true + + base := os.Environ() + env := make([]string, 0, len(base)+len(keysToSet)) + for _, e := range base { + if idx := strings.Index(e, "="); idx >= 0 { + if !keysToSet[e[:idx]] { + env = append(env, e) + } + } else { + env = append(env, e) + } + } + for k, v := range credEnvVars { + env = append(env, k+"="+v) + } + env = append(env, "KUBECONFIG="+kubeconfigPath) + + // Get Site CR to fetch Connect URL + slog.Info("Fetching Site CR from Kubernetes") + cmd2 := exec.CommandContext(ctx, "kubectl", "get", "site", verifySiteName, + "-n", verifyNamespace, + "-o", "yaml") + cmd2.Env = env + + siteYAML, err := cmd2.Output() + if err != nil { + slog.Error("Failed to get Site CR", "error", err) + os.Exit(1) + } + + var site verify.SiteCR + if err := yaml.Unmarshal(siteYAML, &site); err != nil { + slog.Error("Failed to parse Site CR", "error", err) + os.Exit(1) + } + + // Get Connect URL + connectURL := "" + if site.Spec.Connect != nil { + connectURL = fmt.Sprintf("https://connect.%s", site.Spec.Domain) + if site.Spec.Connect.DomainPrefix != "" { + connectURL = fmt.Sprintf("https://%s.%s", site.Spec.Connect.DomainPrefix, site.Spec.Domain) + } + if site.Spec.Connect.BaseDomain != "" { + prefix := "connect" + if site.Spec.Connect.DomainPrefix != "" { + prefix = site.Spec.Connect.DomainPrefix + } + connectURL = fmt.Sprintf("https://%s.%s", prefix, site.Spec.Connect.BaseDomain) + } + } + + // Run cleanup + slog.Info("Cleaning up VIP test credentials") + if err := verify.CleanupCredentials(ctx, env, verifyNamespace, connectURL); err != nil { + slog.Error("Cleanup failed", "error", err) + os.Exit(1) + } + + fmt.Println("\nCleanup completed successfully") +} + +func runVerify(ctx context.Context, cmd *cobra.Command, target string) { + // Load target configuration + t, err := legacy.TargetFromName(target) + if err != nil { + slog.Error("Could not load target", "error", err) + os.Exit(1) + } + + // Get credentials + creds, err := t.Credentials(ctx) + if err != nil { + slog.Error("Failed to get credentials", "error", err) + os.Exit(1) + } + + credEnvVars := creds.EnvVars() + + // Start proxy if needed (non-fatal) + proxyFile := path.Join(internal.DataDir(), "proxy.json") + stopProxy, err := kube.StartProxy(ctx, t, proxyFile) + if err != nil { + slog.Warn("Failed to start proxy", "error", err) + } else { + defer stopProxy() + } + + // Set up kubeconfig + kubeconfigPath, err := kube.SetupKubeConfig(ctx, t, creds) + if err != nil { + slog.Error("Failed to setup kubeconfig", "error", err) + os.Exit(1) + } + + if strings.HasSuffix(verifyImage, ":latest") { + slog.Warn("Using ':latest' image tag is non-deterministic; consider pinning a specific version", "image", verifyImage) + } + + // Prepare environment variables for kubectl (inherit from current env). + // Deduplicate: strip any existing occurrences of keys we're about to set so + // that tools consistently see the intended value regardless of lookup order. + keysToSet := make(map[string]bool, len(credEnvVars)+1) + for k := range credEnvVars { + keysToSet[k] = true + } + keysToSet["KUBECONFIG"] = true + + base := os.Environ() + env := make([]string, 0, len(base)+len(keysToSet)) + for _, e := range base { + if idx := strings.Index(e, "="); idx >= 0 { + if !keysToSet[e[:idx]] { + env = append(env, e) + } + } else { + env = append(env, e) + } + } + for k, v := range credEnvVars { + env = append(env, k+"="+v) + } + env = append(env, "KUBECONFIG="+kubeconfigPath) + + // Run verification + opts := verify.Options{ + Target: target, + SiteName: verifySiteName, + Namespace: verifyNamespace, + Categories: verifyCategories, + LocalMode: verifyLocal, + ConfigOnly: verifyConfigOnly, + Image: verifyImage, + KeycloakURL: verifyKeycloakURL, + Realm: verifyRealm, + TestUsername: verifyTestUsername, + KeycloakAdminSecret: verifyKeycloakAdminSecret, + InteractiveAuth: verifyInteractiveAuth, + Timeout: verifyTimeout, + Env: env, + } + + if err := verify.Run(ctx, opts); err != nil { + slog.Error("Verification failed", "error", err) + os.Exit(1) + } +} diff --git a/docs/cli/PTD_CLI_REFERENCE.md b/docs/cli/PTD_CLI_REFERENCE.md index 7304fea..6ed3168 100644 --- a/docs/cli/PTD_CLI_REFERENCE.md +++ b/docs/cli/PTD_CLI_REFERENCE.md @@ -344,6 +344,47 @@ ptd ensure testing01-staging --dry-run --- +### `ptd verify` + +Run VIP (Verified Installation of Posit) tests against a deployment to validate that products are functioning correctly. See [verify.md](verify.md) for full documentation including authentication modes. + +**Usage:** +```bash +ptd verify [flags] +``` + +**Flags:** + +| Flag | Type | Default | Description | +|------|------|---------|-------------| +| `--site` | string | `main` | Name of the Site CR to verify | +| `--categories` | string | (all) | Test categories to run (pytest `-m` marker) | +| `--local` | bool | false | Run tests locally instead of as a K8s Job | +| `--config-only` | bool | false | Generate and print `vip.toml` without running tests | +| `--image` | string | `ghcr.io/posit-dev/vip:latest` | VIP container image for K8s Job mode | +| `--keycloak-url` | string | (derived) | Override Keycloak URL | +| `--realm` | string | `posit` | Keycloak realm name | +| `--test-username` | string | `vip-test-user` | Keycloak test user name | + +**Examples:** +```bash +# Run all tests as a K8s Job +ptd verify ganso01-staging + +# Generate config only +ptd verify ganso01-staging --config-only + +# Run locally with interactive browser auth (for Okta deployments) +ptd verify ganso01-staging --local --interactive-auth + +# Run specific test categories +ptd verify ganso01-staging --categories prerequisites +``` + +**Implementation:** `/cmd/verify.go`, `/cmd/internal/verify/` + +--- + ### `ptd workon` Start an interactive shell or run a one-shot command with credentials, kubeconfig, and environment configured for a target. Optionally, work within a specific Pulumi stack directory. diff --git a/docs/cli/verify.md b/docs/cli/verify.md new file mode 100644 index 0000000..a0d4886 --- /dev/null +++ b/docs/cli/verify.md @@ -0,0 +1,93 @@ +# ptd verify + +Run VIP (Verified Installation of Posit) tests against a PTD deployment to validate that products are installed correctly and functioning. + +## Usage + +```bash +ptd verify [flags] +``` + +## How it works + +1. Fetches the Site CR from the target cluster +2. Generates a `vip.toml` configuration from the Site CR (product URLs, auth provider) +3. Provisions a test user in Keycloak (if Keycloak is configured) +4. Runs VIP tests either as a Kubernetes Job (default) or locally +5. Streams test output and exits with an appropriate code + +## Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `--site` | `main` | Name of the Site CR to verify | +| `--categories` | (all) | Test categories to run (pytest `-m` marker) | +| `--local` | `false` | Run tests locally instead of as a K8s Job | +| `--config-only` | `false` | Generate and print `vip.toml` without running tests | +| `--image` | `ghcr.io/posit-dev/vip:latest` | VIP container image for K8s Job mode | +| `--keycloak-url` | (derived from Site CR) | Override Keycloak URL | +| `--realm` | `posit` | Keycloak realm name | +| `--test-username` | `vip-test-user` | Keycloak test user name | + +## Examples + +```bash +# Run all VIP tests (K8s Job mode) +ptd verify ganso01-staging + +# Run only prerequisite checks +ptd verify ganso01-staging --categories prerequisites + +# Generate config to inspect without running tests +ptd verify ganso01-staging --config-only + +# Run locally (requires VIP + Python installed) +ptd verify ganso01-staging --local + +# Verify a non-default site +ptd verify ganso01-staging --site secondary +``` + +## Authentication modes + +VIP tests require authenticated access to Connect and Workbench. How credentials are provided depends on the deployment's identity provider. + +### Keycloak deployments (automatic) + +When the Site CR has Keycloak enabled, `ptd verify` automatically: + +1. Reads Keycloak admin credentials from the `{site}-keycloak-initial-admin` Secret +2. Creates a test user via the Keycloak Admin API +3. Stores credentials in a `vip-test-credentials` Secret +4. Passes credentials to VIP via environment variables + +Subsequent runs skip user creation if the Secret already exists. + +### Okta / external IdP deployments (interactive) + +Deployments using external identity providers (Okta, Azure AD, etc.) cannot provision test users programmatically. Use **local mode with interactive auth**: + +```bash +ptd verify ganso01-staging --local --interactive-auth +``` + +This launches a visible browser window where you authenticate through the IdP's login flow. After login, VIP captures the session state and runs the remaining tests headlessly. + +> **Note**: `--interactive-auth` requires `--local` mode. It cannot be used with the K8s Job mode since there is no browser available in-cluster. + +For automated/CI verification of Okta deployments, you can pre-create a `vip-test-credentials` Secret manually: + +```bash +kubectl create secret generic vip-test-credentials \ + --from-literal=username= \ + --from-literal=password= \ + -n posit-team +``` + +### Summary + +| Mode | Auth method | Use case | +|------|-------------|----------| +| K8s Job (default) | Programmatic (Keycloak or pre-existing Secret) | CI, automated checks | +| `--local --interactive-auth` | Browser popup (Okta, Azure AD) | Developer validation | +| `--local` | Pre-existing Secret or env vars | Local automated runs |