-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy path.containerfunctions
More file actions
901 lines (818 loc) · 31.9 KB
/
.containerfunctions
File metadata and controls
901 lines (818 loc) · 31.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
#!/usr/bin/env bash
# run the following commands sshing into the podman machine
# sudo ln -s /Users/nurrony/.config/containers/registries.conf \
# /etc/containers/registries.conf.d/999-podman-desktop-registries-from-host.conf
alias mypodmachine="podman machine init --now --cpus 4 --disk-size 30 --memory 8192 --volume /Users:/Users --volume /private:/private --volume /var/folders:/var/folders --volume $DEV_ZONE:$DEV_ZONE --rootful"
alias docker='podman'
# cleans all podman resources. run it cautiously
dcleanup() {
podman container prune --force 2>/dev/null
podman volume prune --force 2>/dev/null
podman image prune --force --build-cache --all 2>/dev/null
}
# deletes stopped containers
del_stopped() {
local name=$1
local state
state=$(podman inspect --format "{{.State.Running}}" "$name" 2>/dev/null)
if [[ "$state" == "false" ]]; then
podman rm "$name"
fi
}
# bring up local container registry
ctr-registry-up() {
local state=$(podman inspect --format "{{.State.Running}}" registry 2>/dev/null)
if [[ "$state" == "true" ]] || [[ "$state" != "" ]]; then
podman rm -f registry
fi
podman compose -f ${DEV_ZONE_CONFIG_PATH}/kubernetes/clusters/docker-compose.yml up -d
echo 'registry started successfully...'
}
# down local container registry
ctr-rgistry-down() {
podman compose -f ${DEV_ZONE_CONFIG_PATH}/kubernetes/clusters/docker-compose.yml down
echo 'registry is down successfully'
}
# checks if target container relies on other and ensure to run dependent
# containers before running the target container
relies_on() {
for container in "$@"; do
local state
state=$(podman inspect --format "{{.State.Running}}" "$container" 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "$container is not running, starting it for you."
$container
fi
done
}
traefik_validate() {
podman container run --rm \
--volume "${DEV_ZONE_CONFIG_PATH}/traefik:/config:ro" \
traefik:v3 traefik check --providers.file.directory=/config/dynamic
}
traefik_safe_write() {
local TARGET="$1"
local TMP=".${TARGET}.tmp"
cat >"$TMP" || return 1
if traefik_validate; then
mv "$TMP" "$TARGET"
echo "Traefik config applied: $(basename "$TARGET")"
else
echo "Traefik config invalid — rejected"
rm -f "$TMP"
return 1
fi
}
traefik_config() {
local SERVICE_NAME="$1"
local SERVICE_PORT="$2"
if [[ -z "$SERVICE_NAME" || -z "$SERVICE_PORT" ]]; then
echo "usage: traefik_config <service> <port>"
return 1
fi
local HOST="${SERVICE_NAME}.${CONTAINER_FQDN:-localhost}"
local OUT_FILE="${DEV_ZONE_CONFIG_PATH}/traefik/dynamic/${SERVICE_NAME}.yml"
traefik_safe_write "$OUT_FILE" <<EOF
http:
routers:
${SERVICE_NAME}:
rule: "Host(\`${HOST}\`)"
entryPoints:
- websecure
service: ${SERVICE_NAME}
tls: {}
services:
${SERVICE_NAME}:
loadBalancer:
servers:
- url: "http://${SERVICE_NAME}:${SERVICE_PORT}"
passHostHeader: true
EOF
}
traefik() {
local TRAEFIK_BASE="${DEV_ZONE_CONFIG_PATH}/traefik"
mkdir -p "${TRAEFIK_BASE}/certs" "${TRAEFIK_BASE}/dynamic"
# Start Podman API socket (required for podman provider)
# podman system service unix:///run/podman/podman.sock --time=0 >/dev/null 2>&1 &
if podman container exists traefik; then
echo "Traefik already running"
return
fi
podman container run -dit \
--name traefik \
--network "${DEV_CONTAINER_NETWORK_NAME}" \
--publish 80:80 \
--publish 443:443 \
--publish 8080:8080 \
--security-opt label=disable \
--volume /run/podman/podman.sock:/run/podman/podman.sock:ro \
--volume "${TRAEFIK_BASE}/certs:/certs:ro" \
--volume "${TRAEFIK_BASE}/dynamic:/dynamic:ro" \
traefik:v3 \
--log.level=INFO \
--api.dashboard=true \
--api.insecure=true \
\
--entrypoints.web.address=:80 \
--entrypoints.websecure.address=:443 \
--entrypoints.traefik.address=:8080 \
\
--providers.podman=true \
--providers.podman.endpoint=unix:///run/podman/podman.sock \
--providers.podman.exposedbydefault=false \
--providers.podman.network="${DEV_CONTAINER_NETWORK_NAME}" \
\
--providers.file.directory=/dynamic \
--providers.file.watch=true
}
nginx-proxy() {
del_stopped nginx-proxy
local state=$(podman inspect --format "{{.State.Running}}" nginx-proxy 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
mkdir -p ${DEV_ZONE_CONFIG_PATH}/nginx/{certs,vhost.d,conf.d}
podman container run -it -d \
--privileged \
--publish 80:80 \
--publish 443:443 \
--name nginx-proxy \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--volume /run/podman/podman.sock:/tmp/docker.sock:ro \
--volume ${DEV_ZONE_CONFIG_PATH}/nginx/certs:/etc/nginx/certs:ro \
--volume ${DEV_ZONE_CONFIG_PATH}/nginx/vhost.d:/etc/nginx/vhost.d:ro \
--volume ${DEV_ZONE_CONFIG_PATH}/nginx/conf.d/custom_settings.conf:/etc/nginx/conf.d/custom_settings.conf:ro \
nginxproxy/nginx-proxy:alpine
else
echo 'proxy is already running'
fi
}
proxy-manager() {
del_stopped proxy-manager
local state=$(podman inspect --format "{{.State.Running}}" proxy-manager 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
mkdir -p ${DEV_ZONE_CONFIG_PATH}/nginx/manager/{data,letsencrypt}
podman container run -it --detach \
--privileged \
--publish 80:80 \
--publish 443:443 \
--publish 8888:81 \
--name proxy-manager \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--volume ${DEV_ZONE_CONFIG_PATH}/nginx/manager/data:/data:Z \
--volume ${DEV_ZONE_CONFIG_PATH}/nginx/manager/letsencrypt:/etc/letsencrypt:Z \
--env DB_SQLITE_FILE="/data/database.sqlite" \
jc21/nginx-proxy-manager:latest
else
echo 'proxy is already running'
fi
}
portainer() {
relies_on nginx-proxy
del_stopped portainer
local state=$(podman inspect --format "{{.State.Running}}" portainer 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "portainer server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/portainer
podman container run -it -d --privileged \
--volume /run/podman/podman.sock:/run/podman/podman.sock:ro \
--volume ${DEV_ZONE_CONFIG_PATH}/portainer:/data \
--env VIRTUAL_HOST=portainer.${CONTAINER_FQDN:-nurrony.localhost} \
--expose 9000 \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--name portainer \
portainer/portainer
else
echo 'portainer is already running'
fi
}
# creates an nginx config for a local route
nginx_config() {
server=$1
route=$2
cat >${DEV_ZONE_CONFIG_PATH}/nginx/conf.d/${server}.conf <<-EOF
upstream ${server} { server ${route}; }
server {
server_name ${server};
location / {
proxy_pass http://${server};
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host \$http_host;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-Forwarded-For \$remote_addr;
proxy_set_header X-Forwarded-Port \$server_port;
proxy_set_header X-Request-Start \$msec;
}
}
EOF
# restart nginx
podman container restart nginx
# add host to /etc/hosts
hostess add $server 127.0.0.1
# open browser
open "http://${server}"
}
mysqlserver() {
local VERSION=${1:-8.0}
local PORT=${2:-3306}
del_stopped mysqlserver-${VERSION//./-}
local state=$(podman inspect --format "{{.State.Running}}" mysqlserver-${VERSION//./-} 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "mysql $VERSION server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/databases/mysql/${VERSION} &&
podman container run -it -d \
--publish $PORT:3306 \
--health-retries=5 \
--health-interval=15s \
--health-start-period=5s \
--health-cmd="mysqladmin ping -h localhost" \
--env MYSQL_ROOT_PASSWORD=nurrony \
--name mysqlserver-${VERSION//./-} \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--volume ${DEV_ZONE_CONFIG_PATH}/databases/mysql/${VERSION}:/var/lib/mysql \
mysql:${VERSION}
else
echo "mysql server $VERSION is already running"
fi
}
nifiserver() {
local VERSION=${1:-latest}
local PORT=${2:-8443}
local VOLUME_NAME=nifi-volume-${VERSION//./-}
local NIFI_SHARED_DIR=${DEV_ZONE_CONFIG_PATH}/nifiserver/extensions
local NIFI_DATA_DIR=${DEV_ZONE_CONFIG_PATH}/nifiserver/data
del_stopped nifiserver-${VERSION//./-}
local state=$(podman inspect --format "{{.State.Running}}" nifiserver-${VERSION//./-} 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "nifiserver $VERSION server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/nifiserver/${VERSION//./-}
# if ! podman volume inspect "${VOLUME_NAME}" >/dev/null 2>&1; then
# echo "inside volume block"
# mkdir -p ${DEV_ZONE_CONFIG_PATH}/nifiserver/${VERSION//./-} &&
# podman volume create --opt o=bind --driver local --opt type=none \
# --opt device=${DEV_ZONE_CONFIG_PATH}/nifiserver/${VERSION//./-} \
# ${VOLUME_NAME}
# fi
mkdir -p ${NIFI_SHARED_DIR} ${NIFI_DATA_DIR} &&
podman container run -it -d --privileged \
--env VIRTUAL_PORT=8080 \
--env NIFI_WEB_HTTP_PORT=8080 \
--name nifiserver-${VERSION//./-} \
--hostname nifiserver-${VERSION//./-} \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--env SINGLE_USER_CREDENTIALS_USERNAME=admin \
--env SINGLE_USER_CREDENTIALS_PASSWORD=NifiAdmin@121 \
--env VIRTUAL_HOST=nifi.${CONTAINER_FQDN:-nurrony.localhost} \
--volume ${NIFI_DATA_DIR}:/opt/data \
--volume ${NIFI_SHARED_DIR}:/opt/nifi/nifi-current/extensions \
--volume ${DEV_ZONE_CONFIG_PATH}/nifiserver/${VERSION//./-}:/opt/nifi \
--health-retries=5 \
--health-timeout=10s \
--health-interval=15s \
--health-start-period=120s \
--health-cmd="curl -f http://localhost:8080/nifi-api/system-diagnostics || exit 1" \
apache/nifi:${VERSION}
else
echo "nifi server $VERSION is already running"
fi
echo "Username: admin"
echo "Password: NifiAdmin@121"
echo "URL: https://nifi.${CONTAINER_FQDN:-nurrony.localhost}/nifi"
}
mysql() {
local RUNNING_DBSERVER_NAME=$(podman ps --filter "name=mysqlserver" --format "{{.Names}}")
relies_on $RUNNING_DBSERVER_NAME
podman container exec -it $RUNNING_DBSERVER_NAME mysql "$@"
}
dynamodb() {
del_stopped dynamodb
relies_on nginx-proxy
sleep 2
local state=$(podman inspect --format "{{.State.Running}}" dynamodb 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "local dynamodb server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/databases/dynamodb
podman container run -dit \
--volume ${DEV_ZONE_CONFIG_PATH}/databases/dynamodb:/home/dynamodblocal/data \
--expose 8000 \
--name dynamodb \
--workdir /home/dynamodblocal \
--env VIRTUAL_PORT=8000 \
--env VIRTUAL_HOST=dynamodb.${CONTAINER_FQDN:-nurrony.localhost} \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--health-retries=5 \
--health-timeout=5s \
--health-interval=30s \
--health-start-period=20s \
--health-cmd="aws dynamodb list-tables --endpoint-url http://localhost:8000 --region us-east-1 >/dev/null 2>&1 || exit 1" \
amazon/dynamodb-local -jar DynamoDBLocal.jar -sharedDb -optimizeDbBeforeStartup -dbPath ./data
if [ "$1" != "" ]; then
podman network connect $1 dynamodb
fi
else
echo 'local dynamodb server is already running'
fi
}
pma() {
del_stopped pma
relies_on nginx-proxy
sleep 2
local state=$(podman inspect --format "{{.State.Running}}" pma 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "phpMyAdmin is not running, starting it for you."
podman container run -it -d \
--expose 80 \
--name pma \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--env UPLOAD_LIMIT=100M \
--env PMA_ARBITRARY=1 \
--env VIRTUAL_HOST=pma.${CONTAINER_FQDN:-nurrony.localhost} \
--health-cmd='curl -fs http://127.0.0.1/ || exit 1' \
--health-interval=15s \
--health-timeout=5s \
--health-retries=5 \
--health-start-period=5s \
phpmyadmin/phpmyadmin
else
echo 'phpMyAdmin is already running'
fi
}
myblog() {
relies_on nginx-proxy
relies_on mysqlserver
sleep 5
del_stopped personal-blog
local state=$(podman inspect --format "{{.State.Running}}" personal-blog 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "blog server is not running, starting it for you."
podman container run -it -d \
--volume ${DEV_ZONE}/projects/open-sources/nurrony.info/ghost-content:/var/lib/ghost/content \
--env VIRTUAL_HOST=blog.${CONTAINER_FQDN:-nurrony.localhost} \
--env url=http://blog.${CONTAINER_FQDN:-nurrony.localhost} \
--env database__client=mysql \
--env database__connection__host=mysqlserver \
--env database__connection__user=root \
--env database__connection__password=nurrony \
--env database__connection__database=rons_blog \
--env DEV_DOMAIN=http://blog.${CONTAINER_FQDN:-nurrony.localhost} \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--name personal-blog \
ghost:2-alpine
else
echo 'blog is already running'
fi
}
mongoserver() {
local VERSION=${1:-7}
local PORT=${2:-27017}
local DBNAME=${3:-experiments}
local state=$(podman inspect --format "{{.State.Running}}" mongoserver$VERSION 2>/dev/null)
del_stopped mongoserver${VERSION}
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "mongoserver ${VERSION}.x.x is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/databases/mongodb/$VERSION
podman container run -dit \
--user $(id -u):$(id -g) \
--volume ${DEV_ZONE_CONFIG_PATH}/databases/mongodb/$VERSION:/data/db \
--env MONGODB_INITDB_ROOT_USERNAME=root \
--env MONGODB_INITDB_ROOT_PASSWORD=nurrony \
--publish $PORT:27017 \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--name mongoserver$VERSION \
mongo:${VERSION}
else
echo "mongoserver $VERSION.x.x is already running"
fi
}
mongo() {
podman container exec -t mongoserver5 mongo "$@"
}
# check https://github.com/GoogleCloudPlatform/click-to-deploy/blob/master/docker/rabbitmq/README.md
rabbitmq() {
del_stopped rabbitmq
local state=$(podman inspect --format "{{.State.Running}}" rabbitmq 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "rabbitmq broker is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/rabbitmq/{certs,data}
podman container run --detach -it \
--name rabbitmq \
--hostname rabbitmq \
--publish 5672:5672 \
--publish 15672:15672 \
--health-interval=15s \
--health-start-period=10s \
--health-cmd="rabbitmq-diagnostics -q ping" \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--env VIRTUAL_PORT=15672 \
--env RABBITMQ_DEFAULT_USER=admin \
--env RABBITMQ_DEFAULT_PASS=nurrony \
--env VIRTUAL_HOST=rabbitmq.${CONTAINER_FQDN:-nurrony.localhost} \
--volume ${DEV_ZONE_CONFIG_PATH}/rabbitmq/certs:/etc/rabbitmq/ssl \
--volume ${DEV_ZONE_CONFIG_PATH}/rabbitmq/data:/var/lib/rabbitmq \
--volume ${DEV_ZONE_CONFIG_PATH}/rabbitmq/rabbitmq.conf:/etc/rabbitmq/conf.d/10-defaults.conf:ro \
rabbitmq:4-management-alpine
else
echo "rabbitmq broker is already running"
fi
}
# TODO: finish this
kafka() {
local CONFLUENT_PLATFORM_VERSION=${1:-latest}
del_stopped kafka
local state=$(podman inspect --format "{{.State.Running}}" kafka 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "kafka broker ${VERSION} is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/kafka
podman container run --detach -it \
--name kafka \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--publish 9092:9092 \
--publish 9102:9102 \
--env KAFKA_NODE_ID=1 \
--env KAFKA_JMX_PORT=9102 \
--env KAFKA_JMX_HOSTNAME=localhost \
--env CLUSTER_ID=MkU3OEVBNTcwNTJENDM3Qk \
--env KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1 \
--env KAFKA_PROCESS_ROLES=broker,controller \
--env KAFKA_LOG_DIRS=/tmp/kraft-combined-logs \
--env KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS=0 \
--env KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
--env KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT \
--env KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER \
--env KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka:29094 \
--env KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1 \
--env KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 \
--env KAFKA_LISTENERS=PLAINTEXT://kafka:29092,CONTROLLER://kafka:29094,PLAINTEXT_HOST://0.0.0.0:9092 \
--env KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT \
--env VIRTUAL_HOST=kafka.${CONTAINER_FQDN:-nurrony.localhost} \
confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-latest}
else
echo "kafka broker $CONFLUENT_PLATFORM_VERSION is already running"
fi
}
mailserver() {
del_stopped mailserver
relies_on nginx-proxy
sleep 2
local state=$(podman inspect --format "{{.State.Running}}" mailserver 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "mailserver is not running. starting is for you"
mkdir -p ${DEV_ZONE_CONFIG_PATH}/mailserver
podman container run -d \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--expose 8025 \
--name mailserver \
--env TZ=Asia/Dhaka \
--env VIRTUAL_PORT=8025 \
--env MP_SMTP_AUTH_ACCEPT_ANY=true \
--env MP_DATABASE=/data/mailpit.db \
--env MP_SMTP_AUTH_ALLOW_INSECURE=true \
--env VIRTUAL_HOST=mail.${CONTAINER_FQDN:-nurrony.localhost} \
--volume ${DEV_ZONE_CONFIG_PATH}/mailserver:/data \
axllent/mailpit:latest
else
echo "mailserver is already running"
fi
}
zipkin() {
del_stopped zipkin
relies_on nginx-proxy
sleep 2
local VERSION=${1:-3}
local state=$(podman inspect --format "{{.State.Running}}" zipkin 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "zipkin 3.x.x server is not running, starting it for you."
podman container run --detach -it \
--name zipkin${VERSION} \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--publish 9411:9411 \
--env VIRTUAL_PORT=9411 \
--env VIRTUAL_HOST=zipkin.${CONTAINER_FQDN:-nurrony.localhost} \
openzipkin/zipkin:latest
else
echo "zipkin 3.x.x is already running"
fi
}
# spawn up postgres server. takes version as params. defaults to 16.x.x
pgserver() {
local VERSION=${1:-18}
local PORT=${2:-5432}
local DBNAME=${3:-experiments}
del_stopped pgserver${VERSION//./-}
local state=$(podman inspect --format "{{.State.Running}}" pgserver${VERSION//./-} 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
mkdir -p ${DEV_ZONE_CONFIG_PATH}/databases/postgres/${VERSION}
echo "postgres $VERSION.x.x server is not running, starting it for you."
podman container run -it -d \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--ulimit memlock=-1:-1 \
--memory-swappiness=0 \
--env POSTGRES_USER=postgres \
--env POSTGRES_DB=$DBNAME \
--env POSTGRES_PASSWORD=nurrony \
--publish $PORT:5432 \
--name pgserver${VERSION//./-} \
--volume ${DEV_ZONE_CONFIG_PATH}/databases/postgres/${VERSION}:/var/lib/postgresql/data \
postgres:$VERSION
else
echo "postgres $VERSION.x.x is already running"
fi
}
psql() {
local RUNNING_PGSERVER_NAME=$(podman ps --filter "name=pgserver" --format "{{.Names}}")
relies_on $RUNNING_DBSERVER_NAME
podman container exec -it $RUNNING_PGSERVER_NAME psql "$@"
}
pga() {
del_stopped pga
relies_on nginx-proxy
sleep 2
local state=$(podman inspect --format "{{.State.Running}}" pga 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "pgAdmin4 is not running, starting it for you."
podman container run -dit \
--name pga \
--expose 80 \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--env PGADMIN_DISABLE_POSTFIX=yes \
--env PGADMIN_DEFAULT_PASSWORD=nurrony \
--env PGADMIN_DEFAULT_EMAIL=rony@nurrony.info \
--env VIRTUAL_HOST=pga.${CONTAINER_FQDN:-nurrony.localhost} \
dpage/pgadmin4
else
echo 'pgAdmin4 is already running'
fi
}
elasticsearch() {
# Add these extra environment variables if needed
# --env "bootstrap.memory_lock=true"
# --env "ES_JAVA_OPTS=-Xms512m -Xmx512m"
del_stopped elasticsearch
local state=$(podman inspect --format "{{.State.Running}}" elasticsearch 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "elasticsearch server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/databases/elasticsearch/data
podman container run -it -d \
--volume "${DEV_ZONE_CONFIG_PATH}/databases/elasticsearch/data:/usr/share/elasticsearch/data" \
--publish 9200:9200 \
--publish 9300:9300 \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--name elasticsearch \
--ulimit memlock=-1:-1 \
--env VIRTUAL_PORT=9200 \
--env "http.cors.enabled=true" \
--env "ELASTIC_PASSWORD=nurrony" \
--env "discovery.type=single-node" \
--env "xpack.security.enabled=true" \
--env "http.cors.allow-origin=/.*/" \
--env "http.max_content_length=200mb" \
--env "node.name=local-elasticsearch" \
--env "cluster.name=local-elasticsearch" \
--env "http.cors.allow-headers: X-Requested-With,Content-Type,Content-Length,Authorization" \
docker.elastic.co/elasticsearch/elasticsearch:7.17.24 "$@"
else
echo 'elasticsearch server is already running'
fi
}
elasticview() {
del_stopped elasticvue
relies_on elasticsearch
relies_on nginx-proxy
sleep 2
local state=$(podman inspect --format "{{.State.Running}}" elasticview 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
podman container run -dit \
--expose 8080 \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--name elasticview \
--env VIRTUAL_PORT=8080 \
--env VIRTUAL_HOST=elasticsearch.${CONTAINER_FQDN:-nurrony.localhost} \
--volume "${DEV_ZONE_CONFIG_PATH}/databases/elasticsearch/default_clusters.json:/usr/share/nginx/html/api/default_clusters.json:ro" \
cars10/elasticvue
else
echo 'elasticsearch gui is already running'
fi
}
composer() {
podman container run --rm --interactive --tty --name composer \
--user $(id -u):$(id -g) \
--volume ${PWD}:/var/www/html \
nmrony/php:8.4-apache-dev composer "$@"
}
php7() {
podman container run --rm --interactive --tty \
--user $(id -u):$(id -g) \
--volume $PWD:/var/www/html \
--network ${DEV_CONTAINER_NETWORK_NAME} \
nmrony/php:7-apache-dev
}
start-lamp-stack() {
nginx-proxy && pma && mysqlserver && echo 'LAMP stack started successfully.'
}
stop-lamp-stack() {
podman rm -f nginx-proxy pma mysqlserver && echo 'LAMP stack stopped successfully.'
}
sysdig() {
del_stopped sysdig-container
podman container run -it \
--privileged \
--name sysdig-container \
--volume /run/podman/podman.sock:/run/podman/podman.sock \
--volume /dev:/host/dev \
--volume /proc:/host/proc:ro \
--volume /boot:/host/boot:ro \
--volume /lib/modules:/host/lib/modules:ro \
--volume /usr:/host/usr:ro \
sysdig/sysdig
}
dive() {
del_stopped dive
podman container run --rm -it --privileged \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--name dive \
--volume /run/podman/podman.sock:/run/podman/podman.sock:ro \
wagoodman/dive:latest "$@"
}
lazyvim() {
podman container run --interactive --tty --rm \
--workdir /root alpine:edge sh -uelic '
apk add git lazygit neovim ripgrep alpine-sdk --update
git clone https://github.com/LazyVim/starter ~/.config/nvim
cd ~/.config/nvim
nvim
'
}
keycloak() {
del_stopped keycloak
relies_on nginx-proxy
sleep 2
local state=$(podman inspect --format "{{.State.Running}}" pga 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "starting keycloak server for you"
podman container run -dit --expose 8080 \
--expose 9000 \
--expose 8443 \
--name keycloak \
--publish 8080:8080 \
--publish 9000:9000 \
--env VIRTUAL_PORT=8080 \
--env KC_HEALTH_ENABLED=true \
--env KC_BOOTSTRAP_ADMIN_USERNAME="root" \
--env KC_BOOTSTRAP_ADMIN_PASSWORD="nurrony" \
--env VIRTUAL_HOST=keycloak.${CONTAINER_FQDN:-nurrony.localhost} \
quay.io/keycloak/keycloak:latest start-dev
else
echo "keycloak server is already running"
fi
}
grafana() {
del_stopped grafana
relies_on nginx-proxy
sleep 2
local GRAFANA_PORT=${1:-3000}
local GRAFANA_IMG=${2:-latest}
local state=$(podman inspect --format "{{.State.Running}}" grafana 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
echo "grafana is not running. starting grafana server for you"
mkdir -p ${DEV_ZONE_CONFIG_PATH}/monitoring/grafana/data
podman container run -dit --name grafana \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--expose 3000 \
--publish ${GRAFANA_PORT}:3000 \
--env VIRTUAL_HOST=grafana.${CONTAINER_FQDN:-nurrony.localhost} \
--env GF_FEATURE_TOGGLES_ENABLE="traceqlEditor metricsSummary" \
--env GF_FEATURE_TOGGLES_ENABLE="accessControlOnCall" \
--env GF_INSTALL_PLUGINS="https://storage.googleapis.com/integration-artifacts/grafana-lokiexplore-app/grafana-lokiexplore-app-latest.zip;grafana-lokiexplore-app" \
--volume "${DEV_ZONE_CONFIG_PATH}/monitoring/grafana/data:/var/lib/grafana" \
grafana/grafana:${GRAFANA_IMG}
fi
}
gitlab() {
del_stopped gitlab
local state=$(podman inspect --format "{{.State.Running}}" gitlab 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
relies_on nginx-proxy
echo "gitlab server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/gitlab/storage/{runner,gitlab/{configs,data,logs}}
podman container run -dit \
--expose 80 \
--shm-size=256m \
--publish 8888:80 \
--publish 22022:22 \
--name git.${CONTAINER_FQDN:-nurrony.localhost} \ --hostname git.${CONTAINER_FQDN:-nurrony.localhost} \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--volume ${DEV_ZONE_CONFIG_PATH}/gitlab/gitlab.rb:/gitlab.rb \
--volume ${DEV_ZONE_CONFIG_PATH}/gitlab/storage/gitlab/configs:/etc/gitlab \
--volume ${DEV_ZONE_CONFIG_PATH}/gitlab/storage/gitlab/data:/var/opt/gitlab \
--volume ${DEV_ZONE_CONFIG_PATH}/gitlab/storage/gitlab/logs:/var/log/gitlab \
--env VIRTUAL_PORT=80 \
--env GITLAB_OMNIBUS_CONFIG="from_file('/gitlab.rb')" \
--env VIRTUAL_HOST=git.${CONTAINER_FQDN:-nurrony.localhost},registry-git.${CONTAINER_FQDN:-nurrony.localhost} \
gitlab/gitlab-ce:latest
else
echo "gitlab server is already running"
fi
}
# SonarQube NurRony@123456
sonarqube() {
del_stopped sonarqube
local state=$(podman inspect --format "{{.State.Running}}" sonarqube 2>/dev/null)
if [[ "$state" == "false" ]] || [[ "$state" == "" ]]; then
relies_on nginx-proxy
relies_on pgserver
local RUNNING_PGSERVER_NAME=$(podman ps --filter "name=pgserver" --format "{{.Names}}")
echo "sonarqube server is not running, starting it for you."
mkdir -p ${DEV_ZONE_CONFIG_PATH}/sonarqube/{configs,storage/{extensions,data,logs}}
podman container run -dit \
--publish 9000:9000 \
--env VIRTUAL_PORT=9000 \
--env SONAR_JDBC_PASSWORD=nurrony \
--env SONAR_JDBC_USERNAME=postgres \
--env SONAR_ES_BOOTSTRAP_CHECKS_DISABLE=true \
--name sonarqube.${CONTAINER_FQDN:-nurrony.localhost} \
--hostname sonarqube.${CONTAINER_FQDN:-nurrony.localhost} \
--env VIRTUAL_HOST=sonarqube.${CONTAINER_FQDN:-nurrony.localhost} \
--env SONAR_JDBC_URL=jdbc:postgresql://${RUNNING_PGSERVER_NAME}:5432/sonarqube \
--volume ${DEV_ZONE_CONFIG_PATH}/sonarqube/storage/data:/opt/sonarqube/data \
--volume ${DEV_ZONE_CONFIG_PATH}/sonarqube/storage/logs:/opt/sonarqube/logs \
--volume ${DEV_ZONE_CONFIG_PATH}/sonarqube/storage/extensions:/opt/sonarqube/extensions \
--volume ${DEV_ZONE_CONFIG_PATH}/sonarqube/configs/99-sonarqube.conf:/etc/sysctl.d/99-sonarqube.conf:ro \
--network ${DEV_CONTAINER_NETWORK_NAME} \
sonarqube:community
else
echo "sonarqube server is already running"
fi
}
jenkins() {
del_stopped jenkins
del_stopped jenkins-dind
local state=$(podman inspect --format "{{.State.Running}}" jenkins 2>/dev/null)
local dind_state=$(podman inspect --format "{{.State.Running}}" jenkins-dind 2>/dev/null)
mkdir -p ${DEV_ZONE_CONFIG_PATH}/jenkins/{certs,data}
if [ "$state" == "false" ] || [ "$state" == "" ]; then
local IMAGE_NAME="nmrony/jenkins:lts-jdk21-docker"
relies_on nginx-proxy
# Check if the image exists locally
if ! podman image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
echo "${IMAGE_NAME} not found locally. Building it..."
podman image build -t "$IMAGE_NAME" - <<EOF
FROM jenkins/jenkins:lts-jdk21
USER root
RUN apt-get -qq --yes update && apt-get install --yes lsb-release && curl -fsSLo /usr/share/keyrings/docker-archive-keyring.asc https://download.docker.com/linux/debian/gpg
RUN echo "deb [arch=\$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.asc] https://download.docker.com/linux/debian \$(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
RUN apt-get update -qq --yes && apt-get install --yes docker-ce-cli && apt clean --yes && apt autoremove --yes && rm -rf /var/cache/apt/archives /var/lib/apt/lists/* && rm -rf /var/lib/{apt,dpkg,cache,log}/
USER jenkins
HEALTHCHECK --interval=30s --timeout=5s CMD curl -f http://localhost:8080/login || exit 1
EOF
fi
if [ "$dind_state" == "false" ] || [ "$dind_state" == "" ]; then
echo "starting DinD container for jenkins..."
podman container run -dit --name jenkins-dind --privileged \
--expose 2376 \
--health-retries=5 \
--health-timeout=5s \
--health-interval=5s \
--health-cmd="docker info" \
--network-alias docker \
--env DOCKER_TLS_CERTDIR=/certs \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--volume "${DEV_ZONE_CONFIG_PATH}/jenkins/certs:/certs/client" \
--volume "${DEV_ZONE_CONFIG_PATH}/jenkins/data:/var/jenkins_home" \
docker:dind --storage-driver overlay2
sleep 2s
fi
echo "jenkins server is not running. starting it for you"
podman container run -dit \
--publish 9443:8443 \
--publish 8080:8080 \
--publish 50000:50000 \
--env VIRTUAL_PORT=8080 \
--env DOCKER_TLS_VERIFY=1 \
--env DOCKER_CERT_PATH=/certs/client \
--network ${DEV_CONTAINER_NETWORK_NAME} \
--env DOCKER_HOST=tcp://docker:2376 \
--name jenkins.${CONTAINER_FQDN:-nurrony.localhost} \
--hostname jenkins.${CONTAINER_FQDN:-nurrony.localhost} \
--env VIRTUAL_HOST=jenkins.${CONTAINER_FQDN:-nurrony.localhost} \
--volume "${DEV_ZONE_CONFIG_PATH}/jenkins/certs:/certs/client:ro" \
--volume "${DEV_ZONE_CONFIG_PATH}/jenkins/data:/var/jenkins_home" \
${IMAGE_NAME}
else
echo "jenkins server is already running"
fi
}
###
### Awesome sauce by @jpetazzo
###
command_not_found_handle() {
# Check if there is a container image with that name
if ! podman inspect --format '{{ .Author }}' "$1" >&/dev/null; then
echo "$0: $1: command not found"
return
fi
# Check that it's really the name of the image, not a prefix
if podman inspect --format '{{ .Id }}' "$1" | grep -q "^$1"; then
echo "$0: $1: command not found"
return
fi
}