generated from datum-cloud/service-template
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathTaskfile.yaml
More file actions
685 lines (591 loc) Β· 29.7 KB
/
Taskfile.yaml
File metadata and controls
685 lines (591 loc) Β· 29.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
version: '3'
includes:
# Documentation tasks
docs:
taskfile: ./docs/Taskfile.yaml
dir: ./docs
# Test infra tasks
# Must set TASK_X_REMOTE_TASKFILES=1 to use this feature.
#
# See: https://taskfile.dev/experiments/remote-taskfiles
test-infra:
taskfile: https://raw.githubusercontent.com/datum-cloud/test-infra/{{.TEST_INFRA_REPO_REF}}/Taskfile.yml
checksum: a1cf6063def6ee21ba42f8a0818127c92a9a5c313c293387f2294e42480dd3d5
vars:
REPO_REF: "{{.TEST_INFRA_REPO_REF}}"
WAIT_TIMEOUT: "{{.WAIT_TIMEOUT}}"
vars:
WAIT_TIMEOUT: "10m"
TOOL_DIR: "{{.USER_WORKING_DIR}}/bin"
# Container image configuration
IMAGE_NAME: "ghcr.io/datum-cloud/search"
IMAGE_TAG: "dev"
TEST_INFRA_CLUSTER_NAME: "test-infra"
# Test infra repository configuration - can be overridden with environment variable
TEST_INFRA_REPO_REF: 'v0.6.0'
# renovate: datasource=go depName=github.com/kyverno/chainsaw
CHAINSAW_VERSION: v0.2.13
# renovate: datasource=go depName=sigs.k8s.io/controller-tools
CONTROLLER_TOOLS_VERSION: v0.18.0
# Local development certificate directory
CERTS_DIR: "{{.USER_WORKING_DIR}}/.certs"
tasks:
default:
desc: List all available tasks
cmds:
- task --list
silent: true
# Build tasks
build:
desc: Build the search binary
cmds:
- |
set -e
echo "Building search..."
mkdir -p {{.TOOL_DIR}}
# Get git information for version injection
GIT_COMMIT=$(git rev-parse HEAD 2>/dev/null || echo "unknown")
VERSION="v0.0.0-dev+${GIT_COMMIT:0:7}"
GIT_TREE_STATE="clean"
if [ -n "$(git status --porcelain 2>/dev/null)" ]; then
GIT_TREE_STATE="dirty"
fi
BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ' 2>/dev/null || echo "unknown")
echo "Version: ${VERSION}, Commit: ${GIT_COMMIT:0:7}, Tree: ${GIT_TREE_STATE}"
go build \
-ldflags="-X 'go.miloapis.net/search/internal/version.Version=${VERSION}' \
-X 'go.miloapis.net/search/internal/version.GitCommit=${GIT_COMMIT}' \
-X 'go.miloapis.net/search/internal/version.GitTreeState=${GIT_TREE_STATE}' \
-X 'go.miloapis.net/search/internal/version.BuildDate=${BUILD_DATE}'" \
-o {{.TOOL_DIR}}/search ./cmd/search
echo "β
Binary built: {{.TOOL_DIR}}/search"
silent: true
# Development tasks
dev:build:
desc: Build the Search server container image for development
silent: true
cmds:
- |
set -e
echo "Building Search server container image: {{.IMAGE_NAME}}:{{.IMAGE_TAG}}"
# Get git information for version injection
GIT_COMMIT=$(git rev-parse HEAD 2>/dev/null || echo "unknown")
VERSION="v0.0.0-dev+${GIT_COMMIT:0:7}"
GIT_TREE_STATE="clean"
if [ -n "$(git status --porcelain 2>/dev/null)" ]; then
GIT_TREE_STATE="dirty"
fi
BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ' 2>/dev/null || echo "unknown")
docker build \
--build-arg VERSION="${VERSION}" \
--build-arg GIT_COMMIT="${GIT_COMMIT}" \
--build-arg GIT_TREE_STATE="${GIT_TREE_STATE}" \
--build-arg BUILD_DATE="${BUILD_DATE}" \
-t {{.IMAGE_NAME}}:{{.IMAGE_TAG}} \
.
echo "β
Container image built: {{.IMAGE_NAME}}:{{.IMAGE_TAG}}"
dev:load:
desc: Load the Search container image into the kind cluster
silent: true
cmds:
- |
set -e
echo "Loading image {{.IMAGE_NAME}}:{{.IMAGE_TAG}} into kind cluster '{{.TEST_INFRA_CLUSTER_NAME}}'..."
kind load docker-image "{{.IMAGE_NAME}}:{{.IMAGE_TAG}}" --name "{{.TEST_INFRA_CLUSTER_NAME}}"
echo "Successfully loaded image into kind cluster"
# Code generation tasks
generate:
desc: Generate deepcopy and client code
deps:
- task: docs:generate
- task: install-go-tool
vars:
NAME: controller-gen
PACKAGE: sigs.k8s.io/controller-tools/cmd/controller-gen
VERSION: "{{.CONTROLLER_TOOLS_VERSION}}"
cmds:
- echo "Generating deepcopy and object files..."
- "\"{{.TOOL_DIR}}/controller-gen\" object paths=\"./pkg/apis/...\""
# Generate RBAC rules for the controllers.
- echo "Generating RBAC rules for the controllers..."
- "\"{{.TOOL_DIR}}/controller-gen\" rbac:roleName=milo-controller-manager paths=\"./internal/controllers/...\" output:dir=\"./config/overlays/controller-manager/core-control-plane/rbac\""
- task: generate:openapi
silent: true
generate:openapi:
desc: Generate OpenAPI definitions for search API types
deps:
- task: install-go-tool
vars:
NAME: openapi-gen
PACKAGE: k8s.io/code-generator/cmd/openapi-gen
VERSION: v0.23.0
cmds:
- echo "Generating OpenAPI definitions..."
- |
set -e
# Packages to generate OpenAPI for
PACKAGES=(
"pkg/apis/search/v1alpha1"
)
for REL_DIR in "${PACKAGES[@]}"; do
PKG="go.miloapis.net/search/$REL_DIR"
echo "Generating OpenAPI for $PKG..."
"{{.TOOL_DIR}}/openapi-gen" \
--input-dirs "$PKG,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version" \
--output-package "$REL_DIR" \
--output-base "." \
--output-file-base "zz_generated.openapi" \
--go-header-file "hack/boilerplate.go.txt"
done
silent: true
# Test tasks
test:
desc: Run unit tests
cmds:
- go test -v ./...
# Cleanup tasks
clean:
desc: Clean build artifacts
cmds:
- rm -rf {{.TOOL_DIR}}
- rm -rf .task
- echo "β
Cleaned build artifacts"
silent: true
# Format and lint
fmt:
desc: Format Go code
cmds:
- go fmt ./...
- echo "β
Code formatted"
silent: true
vet:
desc: Run go vet
cmds:
- go vet ./...
- echo "β
Vet complete"
silent: true
# Architecture diagram tasks
diagrams:
desc: Generate architecture diagrams from PlantUML
cmds:
- task: docs:diagrams
silent: true
dev:setup:
silent: true
cmds:
- task: test-infra:cluster-up
- task: test-infra:install-observability
- task: dev:install-dependencies
- task: dev:build
- task: dev:load
- task: dev:deploy
ci:setup:
desc: Setup a CI-like environment (no observability)
silent: true
cmds:
- task: test-infra:cluster-up
- echo "β οΈ Skipping observability installation (CI mode)"
- task: dev:install-dependencies
vars:
NATS_CONFIG_DIR: config/dependencies/nats-ci
- task: dev:build
- task: dev:load
- task: dev:deploy
vars:
OVERLAY_DIR: config/overlays/ci
deploy:ci:
cmds:
- task: dev:deploy
vars:
OVERLAY_DIR: config/overlays/ci
dev:install-dependencies:
desc: Install all infrastructure dependencies (NATS)
silent: true
cmds:
- |
set -e
echo "π¦ Installing infrastructure dependencies..."
echo ""
# ============================================================
# Install Etcd
# ============================================================
echo "π¦ Installing Etcd..."
echo "Applying Etcd resources..."
task test-infra:kubectl -- apply -k config/dependencies/etcd
echo "Waiting for Etcd namespace to be created..."
task test-infra:kubectl -- wait --for=jsonpath='{.status.phase}'=Active namespace/etcd-system --timeout=1000s 2>/dev/null || echo "β οΈ Namespace not ready yet"
echo "Waiting for Etcd HelmRelease to be ready..."
task test-infra:kubectl -- wait --for=condition=ready helmrelease/etcd -n etcd-system --timeout=1000s 2>/dev/null || echo "β οΈ Etcd HelmRelease not ready yet"
echo "Waiting for Etcd pods to be ready..."
task test-infra:kubectl -- wait --for=condition=ready pod -l app.kubernetes.io/name=etcd -n etcd-system --timeout=1000s 2>/dev/null || echo "β οΈ Etcd pods not ready yet"
echo "β
Etcd installed"
echo ""
# ============================================================
# Install NATS
# ============================================================
echo "π¦ Installing NATS for event streaming..."
echo "Applying NATS resources..."
task test-infra:kubectl -- apply -k {{.NATS_CONFIG_DIR | default "config/dependencies/nats"}}
echo "Waiting for NATS namespace to be created..."
task test-infra:kubectl -- wait --for=jsonpath='{.status.phase}'=Active namespace/nats-system --timeout=1000s 2>/dev/null || echo "β οΈ Namespace not ready yet"
echo "Waiting for NATS HelmRelease to be ready..."
task test-infra:kubectl -- wait --for=condition=ready helmrelease/nats -n nats-system --timeout=1000s 2>/dev/null || echo "β οΈ NATS HelmRelease not ready yet (may need Flux installed)"
echo "Waiting for NACK HelmRelease to be ready..."
task test-infra:kubectl -- wait --for=condition=ready helmrelease/nack -n nats-system --timeout=1000s 2>/dev/null || echo "β οΈ NACK HelmRelease not ready yet"
echo "Waiting for NATS pods to be ready..."
task test-infra:kubectl -- wait --for=condition=ready pod -l app.kubernetes.io/name=nats -n nats-system --timeout=1000s 2>/dev/null || echo "β οΈ NATS pods not ready yet"
echo "β
NATS installed"
echo ""
# ============================================================
# Install Meilisearch
# ============================================================
echo "π¦ Installing Meilisearch..."
echo "Applying Meilisearch resources..."
task test-infra:kubectl -- apply -k config/dependencies/meilisearch
echo "Waiting for Meilisearch namespace to be created..."
task test-infra:kubectl -- wait --for=jsonpath='{.status.phase}'=Active namespace/meilisearch-system --timeout=1000s 2>/dev/null || echo "β οΈ Namespace not ready yet"
echo "Waiting for Meilisearch HelmRelease to be ready..."
task test-infra:kubectl -- wait --for=condition=ready helmrelease/meilisearch -n meilisearch-system --timeout=1000s 2>/dev/null || echo "β οΈ Meilisearch HelmRelease not ready yet"
echo "Waiting for Meilisearch pods to be ready..."
task test-infra:kubectl -- wait --for=condition=ready pod -l app.kubernetes.io/name=meilisearch -n meilisearch-system --timeout=1000s 2>/dev/null || echo "β οΈ Meilisearch pods not ready yet"
echo "β
Meilisearch installed"
echo ""
# Note: NACK controller is now installed as part of NATS dependencies kustomization above
# Stream configurations will be deployed as part of dev:deploy step
# ============================================================
# Summary
# ============================================================
echo "β
All infrastructure dependencies installed successfully!"
echo ""
echo "π Check status:"
echo " NATS: task test-infra:kubectl -- get pods -n nats-system"
echo " NACK controller: task test-infra:kubectl -- get pods -n nats-system -l app.kubernetes.io/name=nack"
echo " Meilisearch: task test-infra:kubectl -- get pods -n meilisearch-system"
echo ""
echo "π HelmRelease status:"
echo " task test-infra:kubectl -- get helmrelease -n nats-system"
echo " task test-infra:kubectl -- get helmrelease -n meilisearch-system"
echo ""
echo "Note: JetStream stream configurations and S3 bucket will be deployed with the Search server."
echo ""
dev:deploy:
desc: Deploy Search server to test-infra cluster
silent: true
cmds:
- |
set -e
echo "π Deploying Search server to test-infra cluster..."
# Check if deployment manifests exist
if [ ! -d "config" ]; then
echo "β οΈ Warning: config directory not found"
exit 1
fi
echo "π Deploying NATS stream configuration..."
task test-infra:kubectl -- apply -k config/components/nats-streams
echo "β³ Waiting for NATS stream to be ready..."
task test-infra:kubectl -- wait --for=condition=ready stream/audit-events -n nats-system --timeout=1000s 2>/dev/null || echo "β οΈ Stream not ready yet"
echo ""
echo "π Deploying Search server and components..."
task test-infra:kubectl -- apply -k {{.OVERLAY_DIR | default "config/overlays/dev"}}
echo "β³ Waiting for Search API Server to be ready..."
task test-infra:kubectl -- wait --for=condition=available deployment/search-apiserver -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ Search API Server not ready yet"
echo "β³ Waiting for Search Controller Manager to be ready..."
task test-infra:kubectl -- wait --for=condition=available deployment/search-controller-manager -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ Search Controller Manager not ready yet"
echo "β³ Waiting for Vector Sidecar to be ready..."
task test-infra:kubectl -- wait --for=condition=ready pod -l app.kubernetes.io/instance=vector-sidecar -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ Vector Sidecar not ready yet"
echo "β³ Waiting for Vector Sidecar to be ready..."
task test-infra:kubectl -- wait --for=condition=ready helmrelease/vector-sidecar -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ Vector Sidecar HelmRelease not ready yet"
task test-infra:kubectl -- wait --for=condition=ready pod -l app.kubernetes.io/instance=vector-sidecar -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ Vector Sidecar pods not ready yet"
echo "β³ Waiting for Resource Indexer to be ready..."
task test-infra:kubectl -- wait --for=condition=available deployment/resource-indexer -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ Resource Indexer not ready yet"
echo "β³ Waiting for NATS Consumer to be ready..."
task test-infra:kubectl -- wait --for=condition=ready consumer/search-indexer -n search-system --timeout=1000s 2>/dev/null || echo "β οΈ NATS Consumer not ready yet"
echo "β
Search server and all dependencies deployed successfully!"
echo ""
echo "π Check status:"
echo " All resources: task test-infra:kubectl -- get all -n search-system"
echo " Vector pods: task test-infra:kubectl -- get pods -l app.kubernetes.io/instance=vector-sidecar -n search-system"
echo " NATS pods: task test-infra:kubectl -- get pods -n nats-system"
echo " NATS streams: task test-infra:kubectl -- get streams -n nats-system"
echo " Meilisearch pods: task test-infra:kubectl -- get pods -n meilisearch-system"
echo " Etcd pods: task test-infra:kubectl -- get pods -n etcd-system"
echo " Search Server pods: task test-infra:kubectl -- get pods -n search-system"
echo ""
echo "π View logs:"
echo " Vector: task test-infra:kubectl -- logs -l app.kubernetes.io/instance=vector-sidecar -n search-system -f"
echo " NATS: task test-infra:kubectl -- logs -l app.kubernetes.io/name=nats -n nats-system -f"
echo " Meilisearch: task test-infra:kubectl -- logs -l app.kubernetes.io/name=meilisearch -n meilisearch-system -f"
echo " Etcd: task test-infra:kubectl -- logs -l app.kubernetes.io/name=etcd -n etcd-system -f"
echo " Search API Server: task test-infra:kubectl -- logs -l app.kubernetes.io/name=search-apiserver -n search-system -f"
echo " Search Controller: task test-infra:kubectl -- logs -l app.kubernetes.io/name=search-controller-manager -n search-system -f"
dev:generate-webhook-certs:
desc: Generate all certificates for webhook server
cmds:
- mkdir -p "{{.CERTS_DIR}}" && openssl req -x509 -nodes -newkey rsa:4096 -keyout "{{.CERTS_DIR}}/server.key" -out "{{.CERTS_DIR}}/server.crt" -days 1024 -subj "/CN=webhook.zitadel.svc" -addext "subjectAltName=DNS:localhost,DNS:host.docker.internal" -sha256
- |
CA_BUNDLE=$(cat {{.CERTS_DIR}}/server.crt | base64 | tr -d '\n')
export CA_BUNDLE
# Dynamically patch the generated manifest to use local host url and injected CA bundle
perl -0777 -pe 's/(\s*)clientConfig:\n\s+service:\n\s+name: webhook-service\n\s+namespace: system\n\s+path: (.*)/$1clientConfig:\n$1 url: https:\/\/host.docker.internal:9443$2\n$1 caBundle: $ENV{CA_BUNDLE}/' config/webhook/manifests.yaml | task test-infra:kubectl -- apply -f -
dev:run-controller:
desc: Run the controller manager against the LOCAL Search API server (127.0.0.1:9443)
cmds:
- |
# Generate a temporary kubeconfig pointing to localhost:9443
mkdir -p .tmp
kubectl config view --minify --raw | \
sed "s|server:.*|server: https://127.0.0.1:9443|g" | \
sed "s|certificate-authority-data:.*|insecure-skip-tls-verify: true|g" \
> .tmp/local-search-kubeconfig.yaml
- echo "Ensure you are running 'task dev:run-apiserver', 'task dev:pf-meilisearch' and 'task dev:pf-etcd' in another terminal!"
- echo "Scaling down the controller manager deployment"
- |
task test-infra:kubectl -- scale deployment -n search-system search-controller-manager --replicas=0
- |
echo "π Running controller against local Search API server..."
MEILISEARCH_API_KEY=search-master-key \
KUBECONFIG=.tmp/local-search-kubeconfig.yaml \
go run ./cmd/search controller-manager \
--metrics-bind-address=:8085 \
--health-probe-bind-address=:8086 \
--leader-elect=false \
--meilisearch-domain="http://127.0.0.1:7700"
silent: true
dev:run-indexer:
desc: Run the indexer against the LOCAL NATS (127.0.0.1:4222)
cmds:
- echo "Scaling down the indexer deployment"
- task test-infra:kubectl -- scale deployment -n search-system resource-indexer --replicas=0
- echo "Ensure you are running 'task dev:pf-nats' in another terminal!"
- |
# Trap to kill background processes (port-forward) on exit
trap 'kill $(jobs -p)' EXIT
echo "π Starting NATS port-forward in background..."
task dev:pf-nats > /dev/null 2>&1 &
echo "π Starting Meilisearch port-forward in background..."
task dev:pf-meilisearch > /dev/null 2>&1 &
echo "Waiting for port-forward to be ready..."
sleep 5
echo "π Running indexer against local NATS..."
MEILISEARCH_API_KEY=search-master-key \
go run ./cmd/search indexer \
--nats-url="nats://127.0.0.1:4222" \
--nats-subject="audit.>" \
--nats-queue-group="search-indexer" \
--nats-audit-consumer-name="search-indexer" \
--nats-reindex-consumer-name="search-reindexer" \
--nats-stream-name="AUDIT_EVENTS" \
--meilisearch-domain="http://127.0.0.1:7700"
silent: true
dev:publish-test-event:
desc: Publish a test event to the local NATS server (requires dev:pf-nats)
cmds:
- go run ./hack/publish_event.go
dev:pf-vectorsidecar:
desc: Port forward Vector sidecar for local development
cmds:
- echo "Port forwarding Vector sidecar to localhost:8080..."
- task test-infra:kubectl -- port-forward -n search-system svc/vector-sidecar 8080:8080
dev:pf-meilisearch:
desc: Port forward Meilisearch for local development
cmds:
- echo "Port forwarding Meilisearch to localhost:7700..."
- task test-infra:kubectl -- port-forward -n meilisearch-system svc/meilisearch 7700:7700
dev:pf-nats:
desc: Port forward NATS for local development
cmds:
- echo "Port forwarding NATS to localhost:4222..."
- task test-infra:kubectl -- port-forward -n nats-system svc/nats 4222:4222
dev:pf-etcd:
desc: Port forward Etcd for local development
cmds:
- echo "Port forwarding Etcd to localhost:2379..."
- task test-infra:kubectl -- port-forward -n etcd-system svc/etcd 2379:2379
dev:run-apiserver:
desc: Run the API server locally (requires dev:pf-etcd running)
cmds:
- |
# Ensure kubeconfig is up to date with the current Kind cluster port
echo "Syncing kubeconfig for cluster '{{.TEST_INFRA_CLUSTER_NAME}}'..."
kind export kubeconfig --name "{{.TEST_INFRA_CLUSTER_NAME}}"
- |
current_context=$(kubectl config current-context)
if [ "$current_context" != "kind-{{.TEST_INFRA_CLUSTER_NAME}}" ]; then
echo "β Error: Wrong context! You are in '$current_context', but must be in 'kind-{{.TEST_INFRA_CLUSTER_NAME}}'."
echo "Please run context switch command first."
exit 1
fi
- mkdir -p "{{.CERTS_DIR}}"
- |
# Extract Kind CA to allowed local kubectl to authenticate via client certs
kubectl config view --minify --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 -d > "{{.CERTS_DIR}}/kind-ca.crt"
- echo "Running Search API Server locally..."
- echo "Ensure you are running 'task dev:pf-etcd' in another terminal!"
- |
# Use KUBECONFIG if set, otherwise fallback to default
KCFG=${KUBECONFIG:-$HOME/.kube/config}
go run ./cmd/search serve \
--etcd-servers http://127.0.0.1:2379 \
--secure-port 9443 \
--bind-address 127.0.0.1 \
--authentication-skip-lookup=true \
--authentication-kubeconfig="$KCFG" \
--authorization-kubeconfig="$KCFG" \
--kubeconfig="$KCFG" \
--client-ca-file="{{.CERTS_DIR}}/kind-ca.crt" \
--authorization-always-allow-paths=/healthz,/readyz,/livez,/openapi,/openapi/v2,/openapi/v3,/apis,/api
dev:undeploy:
desc: Undeploy Search server from test-infra cluster
silent: true
cmds:
- |
set -e
echo "ποΈ Undeploying Search server and components..."
task test-infra:kubectl -- delete -k config/overlays/dev --ignore-not-found=true
echo "ποΈ Undeploying NATS stream configuration..."
task test-infra:kubectl -- delete -k config/components/nats-streams --ignore-not-found=true
echo "β
Search server and related components undeployed."
test:end-to-end:
desc: Run end-to-end tests using Chainsaw against the test-infra cluster. Pass directory names to run specific tests (e.g., 'task test:end-to-end -- audit-logging')
deps:
- task: install-go-tool
vars:
NAME: chainsaw
PACKAGE: github.com/kyverno/chainsaw
VERSION: "{{.CHAINSAW_VERSION}}"
cmds:
- |
set -e
echo "π§ͺ Running Chainsaw end-to-end tests against test-infra cluster..."
# Get kubeconfig for test-infra cluster
KUBECONFIG_PATH="${HOME}/.kube/config"
KUBE_CONTEXT="kind-{{.TEST_INFRA_CLUSTER_NAME}}"
# Verify connectivity to test-infra cluster
echo "Verifying connectivity to test-infra cluster..."
if ! kubectl --context "$KUBE_CONTEXT" get --raw /healthz &>/dev/null; then
echo "β Error: Cannot connect to test-infra cluster"
echo "Please ensure the test infrastructure is running with 'task dev:setup'"
echo "You can check the status with:"
echo " task test-infra:kubectl -- get pods -A"
exit 1
fi
echo "β
Successfully connected to test-infra cluster"
# Determine test paths based on CLI arguments
if [ -z "{{.CLI_ARGS}}" ]; then
# No arguments provided - run all tests
echo "No test directories specified - running all end-to-end tests..."
TEST_PATHS="test/"
else
# Arguments provided - construct test paths
echo "Running tests for specified directories: {{.CLI_ARGS}}"
TEST_PATHS=""
for dir in {{.CLI_ARGS}}; do
if [ -d "$dir" ]; then
# If argument is a valid path itself (e.g. test/infra/meilisearch), use it
TEST_PATHS="$TEST_PATHS $dir"
elif [ -d "test/$dir" ]; then
# If argument is a subdirectory name (e.g. infra/meilisearch), prepend test/
TEST_PATHS="$TEST_PATHS test/$dir"
else
echo "β οΈ Warning: Test directory '$dir' or 'test/$dir' does not exist, skipping..."
fi
done
# Check if we found any valid test directories
if [ -z "$TEST_PATHS" ]; then
echo "β Error: No valid test directories found for arguments: {{.CLI_ARGS}}"
echo "Available test directories:"
ls -1 test/ 2>/dev/null || echo " (none)"
exit 1
fi
fi
echo "π Test paths: $TEST_PATHS"
echo ""
# Run Chainsaw with the test-infra cluster context
"{{.TOOL_DIR}}/chainsaw" test $TEST_PATHS \
--kube-context "$KUBE_CONTEXT"
silent: true
install-go-tool:
desc: Install a Go tool to {{.TOOL_DIR}}/{{.NAME}} (symlinked from {{.TOOL_DIR}}/{{.NAME}}-{{.VERSION}})
silent: true
internal: true
# vars: - Variables that need to be set when depending on this task
# NAME:
# PACKAGE:
# VERSION:
cmds:
- mkdir -p {{.TOOL_DIR}}
- |
set -e
# Capture Taskfile vars into shell vars for clarity and safety in the script
_NAME="{{.NAME}}"
_PACKAGE="{{.PACKAGE}}"
_VERSION="{{.VERSION}}"
_TOOL_DIR="{{.TOOL_DIR}}"
_VERSIONED_TOOL_PATH="$_TOOL_DIR/$_NAME-$_VERSION" # e.g., ./bin/crdoc-v0.6.4
_SYMLINK_PATH="$_TOOL_DIR/$_NAME" # e.g., ./bin/crdoc (this is where go install puts it first)
# Check if the correctly versioned binary already exists
if [ ! -f "$_VERSIONED_TOOL_PATH" ]; then
echo "Downloading $_PACKAGE@$_VERSION (binary name: $_NAME) to $_VERSIONED_TOOL_PATH"
# Ensure the path where `go install` will place the binary (before mv) is clear.
# This is $_SYMLINK_PATH (e.g., ./bin/crdoc).
if [ -d "$_SYMLINK_PATH" ]; then
echo "Error: Target path $_SYMLINK_PATH for 'go install' is an existing directory. Please remove it manually."
exit 1
fi
# Remove if it's a file or symlink, to mimic `rm -f $(1)` from Makefile.
# This ensures 'go install' doesn't conflict with an existing symlink or wrong file.
echo "Preparing $_SYMLINK_PATH for new installation..."
rm -f "$_SYMLINK_PATH" || true
echo "Installing with GOBIN=$_TOOL_DIR..."
# 'go install' will place the executable (named $_NAME) into $_TOOL_DIR.
# This relies on $_NAME being the actual binary name derived from $_PACKAGE.
if ! GOBIN="$_TOOL_DIR" go install "$_PACKAGE@$_VERSION"; then
echo "Failed to 'go install $_PACKAGE@$_VERSION' with GOBIN=$_TOOL_DIR"
exit 1
fi
# After `go install`, the binary should be at $_SYMLINK_PATH (e.g. $_TOOL_DIR/$_NAME)
if [ ! -f "$_SYMLINK_PATH" ]; then
echo "Error: 'go install' did not produce $_SYMLINK_PATH"
# As a fallback, check if it was installed with the package basename if _NAME was different
_PKG_BASENAME=$(basename "$_PACKAGE")
if [ "$_PKG_BASENAME" != "$_NAME" ] && [ -f "$_TOOL_DIR/$_PKG_BASENAME" ]; then
echo "Found $_TOOL_DIR/$_PKG_BASENAME instead (package basename). Moving this one."
mv "$_TOOL_DIR/$_PKG_BASENAME" "$_VERSIONED_TOOL_PATH"
else
echo "Please ensure the NAME variable ('$_NAME') accurately matches the binary name produced by 'go install $_PACKAGE'."
exit 1
fi
else
# Binary $_SYMLINK_PATH was created as expected. Now move it to its versioned path.
echo "Moving installed binary from $_SYMLINK_PATH to $_VERSIONED_TOOL_PATH"
mv "$_SYMLINK_PATH" "$_VERSIONED_TOOL_PATH"
fi
# Create/update the symlink (e.g., ./bin/crdoc -> crdoc-v0.6.4)
# The target of the symlink is relative to _TOOL_DIR.
echo "Creating/updating symlink: $_SYMLINK_PATH -> $_NAME-$_VERSION (within $_TOOL_DIR)"
(cd "$_TOOL_DIR" && ln -sf "$_NAME-$_VERSION" "$_NAME")
echo "Tool $_NAME is now available at $_SYMLINK_PATH (points to $_VERSIONED_TOOL_PATH)"
fi
dev:redeploy:
desc: Quick rebuild and redeploy for development iterations
deps:
- dev:build
- dev:load
- dev:deploy
cmds:
- |
set -e
echo "Redeploying Search's apiserver..."
echo "Redeploying Search controller manager..."
# Restart the deployment to pick up new image
task test-infra:kubectl -- rollout restart deployment/search-apiserver -n search-system
task test-infra:kubectl -- rollout restart deployment/search-controller-manager -n search-system
# Wait for rollout to complete
echo "Waiting for rollout to complete..."
task test-infra:kubectl -- rollout status deployment/search-controller-manager -n search-system --timeout=1000s
echo "β
Redeployment complete!"
echo "Check logs with: task test-infra:kubectl -- logs -n search-system -l app.kubernetes.io/name=search-controller-manager"
dev:nats-queue:
desc: View the NATS queue for the indexer
cmds:
- task test-infra:kubectl -- exec -n nats-system $(task test-infra:kubectl -- get pods -n nats-system -l app.kubernetes.io/component=nats-box -o jsonpath="{.items[0].metadata.name}") -- nats consumer info AUDIT_EVENTS search-indexer