CKAD training notes.
# enable ip forwarding
cat <<EOF | sudo tee /etc/sysctl.d/10-ipv4-forward.conf
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
# disable memory swap
sudo swapoff -a# add tools
sudo apt-get clean
sudo apt-get update
sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release -y
# add containerd repository
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# install containerd
sudo apt-get update && sudo apt-get install containerd.io -ysudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
grep SystemdCgroup /etc/containerd/config.toml
sudo systemctl restart containerd
sudo systemctl is-active containerd# add kubernetes repository
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
# install kubernetes tools
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectlsudo kubeadm initmkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
source <(kubectl completion bash)kubectl get node
kubectl get pods -n kube-systemkubeadm token create --print-join-commandsudo kubeadm join $CONTROLLER_IP:6443 --token $JOIN_TOKEN --discovery-token-ca-cert-hash $CA_CERT_HASHcurl https://raw.githubusercontent.com/projectcalico/calico/v3.28.2/manifests/calico.yaml -O
kubectl apply -f calico.yaml
kubectl get pods -n kube-system | grep calicomkdir gohello
cat <<EOF >go.mod
module gohello
go 1.23.2
EOF
cat <<EOF >main.go
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, you've requested: %s\n", r.URL.Path)
})
http.ListenAndServe(":8080", nil)
}
EOF
cat <<EOF >Dockerfile
FROM golang:1.23.2 AS builder
WORKDIR /app
COPY . .
RUN go build -o gohello .
ENTRYPOINT [ "./gohello" ]
EOF
docker login
docker build -t $DOCKERHUB_USERNAME/gohello:1.0.0
docker push $DOCKERHUB_USERNAME/gohello:1.0.0
cat <<EOF >Dockerfile
FROM golang:1.23.2 AS builder
WORKDIR /app
COPY . .
RUN go build -o gohello .
FROM gcr.io/distroless/base-debian12
WORKDIR /app
COPY --from=builder /app/gohello .
ENTRYPOINT [ "./gohello" ]
EOF
docker login
docker build -t $DOCKERHUB_USERNAME/gohello:1.1.0
docker push $DOCKERHUB_USERNAME/gohello:1.1.0kubectl create deployment nginx --image=public.ecr.aws/docker/library/nginx:1.27.2 --output=yaml --dry-run=client >nginx-deployment.yaml
cat nginx-deployment.yaml
kubectl apply -f nginx-deployment.yaml
kubectl get deployment
kubectl get deployment nginx
kuebctl get deployment nginx -o yaml
kubectl describe deployment nginx
kubectl get pods
kubectl port-forward deployment/nginx --address=0.0.0.0 8080:80
curl -v localhost:8080 # or access public ip via browsersed -i 's/1.27.2/1.27.1/g' nginx-deployment.yaml
cat nginx-deployment.yaml
kubectl apply -f nginx-deployment.yaml
kubectl get deployment nginx
kubectl get pods
kubectl port-forward deployment/nginx --address=0.0.0.0 8080:80
curl -v localhost:8080 # or access public ip via browserkubectl scale deployment nginx --replicas=2
kubectl get deployment
kubectl get pods
kubectl edit deployment nginx # set replicas back to 1- Create a Deployment named
httpdwith imagepublic.ecr.aws/docker/library/httpd:2.46.2. What is the result?
Ensure all nodes run a copy of a Pod. Usually used for operational tools or add-ons.
cat <<EOF >nginx-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx
labels:
app: nginx
spec:
selector:
matchLabels:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: public.ecr.aws/docker/library/nginx:1.27.2
EOF
kubectl apply -f nginx-daemonset.yaml- Create a DaemonSet with name
kuardthat runsguenterh/kuard-amd64:blueimage.
Like Deployment, but used to deploy stateful application. Maintain a sticky identity for each Pod.
cat <<EOF >mysql-statefulset.yaml
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
labels:
app: mysql
spec:
serviceName: mysql
replicas: 1
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: public.ecr.aws/docker/library/mysql:8.4.2
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "P@ssw0rd"
- name: MYSQL_DATABASE
value: "mydb"
- name: MYSQL_USER
value: "user"
- name: MYSQL_PASSWORD
value: "P@ssw0rd"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
volumes:
- name: data
hostPath:
path: /data/mysql
type: DirectoryOrCreate
EOF
kubectl apply -f mysql-statefulset.yaml
kubectl get sts -o widekubectl exec -ti mysql-0 -- bash
mysql -h 127.0.0.1 -u root -p mydb
select version();
create table users (id int primary key, email text);
show tables;
exitsed -i 's/8.4.2/8.4.3/g' mysql-statefulset.yaml
kubectl apply -f mysql-statefulset.yaml
kubectl exec -ti mysql-0 -- bash
mysql -h 127.0.0.1 -u root -p mydb
select version();
show tables;- Try to create a StatefulSet for postgres.
- Check here for information related to the container image.
cat <<EOF > backup-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
template:
spec:
containers:
- name: backup
image: public.ecr.aws/docker/library/busybox:1.37.0
command: ["/bin/sh", "-c", "cp /etc/hostname /mnt/hostname-$(date +%s).txt"]
volumeMounts:
- name: data
mountPath: /mnt
restartPolicy: Never
volumes:
- name: data
hostPath:
path: /data/backup
type: DirectoryOrCreate
backoffLimit: 3
EOF
kubectl apply -f backup-job.yaml
kubectl get pods -o wide
# On worker1 or worker2
ls /data/backupcat <<EOF > backup-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
creationTimestamp: null
name: backup
spec:
jobTemplate:
metadata:
creationTimestamp: null
name: backup
spec:
template:
metadata:
creationTimestamp: null
spec:
restartPolicy: Never
containers:
- image: busybox:1.37.0
name: backup
command:
- /bin/sh
- -c
- cp /etc/hostname /mnt/hostname
volumeMounts:
- name: data
mountPath: /mnt
volumes:
- name: data
hostPath:
path: /data/asdasd
type: DirectoryOrCreate
schedule: '* * * * *'
EOF
kubectl apply -f backup-cronjob.yaml
kubectl get pods -o wide
# On worker1 or worker2
ls /data/backup
kubectl delete cj backup- Create CronJob named
backup-kubenesia-comto backup https://kubenesia.com every 5 minutes. File should be visible on worker with location/data/kubenesia-backup/index.html. - Simulate a scheduled database backup:
- Create a
postgresStatefulSet with databasetest_dband a single table namedusers. - Create a Service named
postgres. - Create a CronJob named
postgres-backupthat runpg_dump -h postgres -U postgres test_db -f /mnt/test_db.sql. Use environment variablePGPASSWORD. - The backup file should be visible on worker node on
/data/postgres-backup/test_db.sql. - Backup should be updated every hour.
- Create a
cat <<EOF >myapp-with-sidecar.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
labels:
app: myapp
spec:
replicas: 1
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: alpine:latest
command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done']
volumeMounts:
- name: data
mountPath: /opt
- name: logshipper
image: alpine:latest
command: ['sh', '-c', 'tail -F /opt/logs.txt']
volumeMounts:
- name: data
mountPath: /opt
volumes:
- name: data
emptyDir: {}
EOF
kubectl apply -f sidecar.yaml
kubectl get pods
kubectl logs myapp -c logshippercat <<EOF >nginx-with-init-container-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-init-container
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
initContainers:
- name: install
image: busybox:1.37
command:
- wget
- -O
- /usr/share/nginx/html/index.html
- https://kubenesia.com
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
emptyDir: {}
EOF
kubectl apply -f nginx-with-init-container-pod.yaml
kubectl get pods nginx-with-init-container
kubectl port-forward nginx-with-init-container 8080:80
curl -v localhost:8080sudo apt-get install --yes nfs-kernel-server
cat <<EOF | sudo tee /etc/exports
/srv/nfs4 10.0.0.0/8(rw,no_subtree_check,no_root_squash)
EOF
sudo mkdir /srv/nfs4
sudo chmod -R 777 /srv/nfs4
sudo systemctl restart nfs-kernel-serversudo apt-get install --yes nfs-commonexport NFS_SERVER=$(ip -4 a | grep global | head -n 1 | awk '{print $2}' | cut -d '/' -f 1)
cat <<EOF >nginx-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx
spec:
storageClassName: "nfs"
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: $NFS_SERVER
path: /srv/nfs4
EOF
kubectl apply -f nginx-pv.yaml
kubectl get pv
kubectl describe pv nginx
cat <<EOF >nginx-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx
spec:
storageClassName: "nfs"
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
EOF
kubectl apply -f nginx-pvc.yaml
kubectl get pvc
kubectl describe pvc nginxcat <<EOF >nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: nginx:1.27.2
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
persistentVolumeClaim:
claimName: nginx
EOF
kubectl apply -f nginx-pod.yaml
kubectl describe pods nginx
kubectl get pods nginx -o wide
kubectl port-forward nginx 8080:80
echo "Old landing page" | sudo tee /srv/nfs4/index.html
curl localhost:8080
echo "New landing page" | sudo tee /srv/nfs4/index.html
curl localhost:8080
kubectl delete pods nginx
kubectl delete pvc nginx
kubectl delete pv nginx- Create a StatefulSet named
postgresthat uses a PV and a PVC with the same name. - The PV should be backed by NFS.
- Create a new database with name
test_dband then remove the pod. - Check if the pod is recreated and
test_dbstill exists.
cat <<EOF >nginx-service.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: ClusterIP
selector:
app: nginx
ports:
- port: 80
targetPort: 80
EOF
kubectl apply -f nginx-service.yaml
cat <<EOF >nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.27.1
EOF
kubectl apply -f nginx-deployment.yaml
kubectl run --rm -ti --image=nicolaka/netshoot -- bash
curl -Is nginx | grep Server
exit
cat <<EOF >nginx-canary-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-canary
spec:
selector:
matchLabels:
app: nginx
canary: "true"
template:
metadata:
labels:
app: nginx
canary: "true"
spec:
containers:
- name: nginx
image: nginx:1.27.2
EOF
kubectl apply -f nginx-canary-deployment.yaml
kubectl run --rm -ti --image=nicolaka/netshoot -- bash
for i in {1..10}; do curl -Is nginx | grep Server; done
exit
kubectl delete deployment nginx
kubectl delete deployment nginx-canarycat <<EOF >echo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: echo
labels:
app: echo
spec:
replicas: 5
selector:
matchLabels:
app: echo
template:
metadata:
labels:
app: echo
spec:
containers:
- name: echo
image: mendhak/http-https-echo:31
EOF
kubectl apply -f echo.yaml
kubectl get pods
kubectl set image deployment echo echo=mendhak-http-https/echo:32
kubectl get pods
```bash
cat <<EOF >echo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: echo
name: echo
spec:
replicas: 4
selector:
matchLabels:
app: echo
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
template:
metadata:
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo:31
name: echo
EOF
kubectl apply -f echo.yaml
kubectl get podscat <<EOF >echo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: echo
name: echo
spec:
replicas: 4
selector:
matchLabels:
app: echo
strategy:
type: Recreate
template:
metadata:
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo:32
name: echo
EOF
kubectl apply -f echo.yaml
kubectl get pods
kubectl set image deployment echo echo=mendhak/http-https-echo:31
kubectl get podssudo apt-get install git yq -y
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
which helm
helm versionhelm create chart-test
cd chart-test
ls templates
cat values.yamlcat <<EOF >/tmp/custom-values.yaml
replicaCount: 2
image:
repository: nginx
tag: 1.27.2
EOF
helm install nginx . -f /tmp/custom-values.yaml
helm ls
kubectl get pods
kubectl get svc
helm uninstall nginxcat <<EOF >/tmp/blog-values.yaml
wordpressUsername: robot
wordpressPassword: P@ssw0rd
wordpressEmail: robot@gmail.com
wordpressFirstName: Robot
wordpressLastName: Robot
wordpressBlogName: Blog of Robot
persistence:
enabled: false
mariadb:
primary:
persistence:
enabled: false
EOF
helm install blog oci://registry-1.docker.io/bitnamicharts/wordpress -f /tmp/blog-values.yaml
kubectl get pods
export NODE_PORT=$(kubectl get svc blog-wordpress -o yaml | yq .spec.ports[0].nodePort)
echo $NODE_PORT
curl localhost:$NODE_PORT
helm uninstall blogmkdir ~/nginx-kustomize
cd ~/nginx-kustomize
mkdir base development production
cat <<EOF >base/service.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: ClusterIP
selector:
app: nginx
ports:
- port: 80
targetPort: 80
EOF
cat <<EOF >base/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.27.1
EOF
cat <<EOF >base/kustomization.yaml
resources:
- service.yaml
- deployment.yaml
EOF
cat <<EOF >development/kustomization.yaml
resources:
- ../base
patches:
- target:
group: apps
version: v1
kind: Deployment
name: nginx
patch: |-
- op: replace
path: /spec/replicas
value: 2
EOF
cat <<EOF >production/kustomization.yaml
resources:
- ../base
patches:
- target:
group: apps
version: v1
kind: Deployment
name: nginx
patch: |-
- op: replace
path: /spec/replicas
value: 5
EOF
kubectl kustomize development
kubectl kustomize productionkubectl api-versions
cat <<EOF >nginx-old-deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-old
spec:
replicas: 1
selector:
matchLabels:
app: nginx-old
template:
metadata:
labels:
app: nginx-old
spec:
containers:
- name: nginx
image: nginx:1.27.2
EOF
vim nginx-old-deployment.yaml # fix the apiVersion
kubectl apply -f nginx-old-deployment.yamlUsed to protect slow starting containers from serving traffic when they are not ready.
cat <<EOF >kuard.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
spec:
replicas: 1
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: guenterh/kuard-amd64:blue
name: kuard-amd64
startupProbe:
httpGet:
path: /ready
port: 6000
EOF
kubectl apply -f kuard.yaml
kubectl get pods
kubectl describe pods -l app=kuard
sed -i 's/6000/8080/g' kuard.yaml
kubectl apply -f kuard.yaml
kubectl describe pods -l app=kuardUsed to check the health of the Pod periodically. Pod will be not be restarted if fail, only excluded from traffic serving.
cat <<EOF >kuard.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
spec:
replicas: 1
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: guenterh/kuard-amd64:blue
name: kuard-amd64
startupProbe:
httpGet:
path: /ready
port: 8080
readinessProbe:
httpGet:
path: /ready
port: 8080
EOF
kubectl apply -f kuard.yaml
kubectl get pods
kubectl describe pods -l app=kuardUsed to check the health of the Pod periodically. Pod will be restarted if fail.
cat <<EOF >kuard.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
spec:
replicas: 1
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: guenterh/kuard-amd64:blue
name: kuard-amd64
startupProbe:
httpGet:
path: /ready
port: 8080
livenessProbe:
httpGet:
path: /healthy
port: 8080
EOF
kubectl apply -f kuard.yaml
kubectl get pods
kubectl describe pods -l app=kuard- Try to create a Deployment with image
guenterh/kuard-amd64:bluewith 5 replicas and access port 8080 of the container. - Try to create a Deployment with the following spec:
- Name:
web - Two images:
nginx:1.27.2andguenterh/kuard-amd64:blue - Replica: 10
- Startup probe to nginx
- Liveness probe to kuard
- Name:
kubectl logs deployment/kuard
kubectl logs -f deployment/kuard- Used to control the resource usage (CPU and memory) of a Pod.
- Best practice: do not use CPU limit to prevent throttling: (https://home.robusta.dev/blog/stop-using-cpu-limits)
- Always define resource requests
- Easier capacity planning
- Less surprise
curl -fsSLo metrics-server.yaml https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
vim metrics-server.yaml
# add --kubelet-insecure-tls under args
kubectl apply -f metrics-server.yaml
kubectl -n kube-system get pods
kubectl top nodes
kubectl top podscat <<EOF >pod-requests-limits.yaml
apiVersion: v1
kind: Pod
metadata:
name: echo-requests-limits
spec:
containers:
- name: echo
image: mendhak/http-https-echo:31
resources:
requests:
memory: 128Mi
cpu: 250m
limits:
memory: 256Mi
cpu: 500m
EOF
kubectl apply -f pod-requests-limits.yaml
kubectl get pods
kubectl describe pods
kubectl describe node worker1
kubectl describe node worker2- If resource requests can't be fulfilled by any node, then Pod will stuck in Pending state.
cat <<EOF >pod-requests-limits.yaml
apiVersion: v1
kind: Pod
metadata:
name: echo-requests-limits
spec:
containers:
- name: echo
image: mendhak/http-https-echo:31
resources:
requests:
memory: 128Gi
cpu: 250m
limits:
memory: 256Gi
cpu: 500m
EOF
kubectl apply -f pod-requests-limits.yaml
kubectl get pods
kubectl describe pods
kubectl describe node worker1
kubectl describe node worker2
kubectl delete pod echo-requests-limitsControl resources per namespace instead of per pod.
kubectl create ns payment
cat <<EOF >resourcequota-payment.yaml
apiVersion: v1
kind: ResourceQuota
metadata:
name: payment
namespace: payment
spec:
hard:
requests.cpu: "2"
EOF
kubectl apply -f resourcequota-payment.yaml
kubectl get resourcequota -n payment
cat <<EOF >nginx-payment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: payment
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.27.2
resources:
requests:
cpu: 1000m
EOF
kubectl apply -f nginx-payment.yaml
kubectl get pods -n paymentcat <<EOF >echo-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: echo
data:
MODE: development
COLOR: blue
SERVICE: echo
MULTILINE: |
Line
Line
Line
EOF
kubectl apply -f echo-configmap.yaml
kubectl get cm echo
kubectl get cm echo -o yamlcat <<EOF >echo-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: echo
name: echo
spec:
replicas: 1
selector:
matchLabels:
app: echo
template:
metadata:
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo:31
name: echo
envFrom:
- configMapRef:
name: echo
EOF
kubectl apply -f echo-deployment.yaml
kubectl exec -ti deployment/echo -- sh
printenv | egrep 'COLOR=|MODE=|SERVICE='
exitcat <<EOF >echo-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: echo
name: echo
spec:
replicas: 1
selector:
matchLabels:
app: echo
template:
metadata:
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo:31
name: echo
volumeMounts:
- mountPath: /data/config
name: config
volumes:
- name: config
configMap:
name: echo
EOF
kubectl apply -f echo-deployment.yaml
kubectl exec -ti deployment/echo -- sh
ls /data/config
cat /data/config
exitkubectl create secret generic echo --from-literal=MODE=development --from-literal=COLOR=blue
kubectl get secret
kubectl get secret echo -o yamlkubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: echo
name: echo
spec:
replicas: 1
selector:
matchLabels:
app: echo
template:
metadata:
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo:31
name: echo
envFrom:
- secretRef:
name: echo
EOF
kubectl exec -ti deployment/echo -- sh
printenv | egrep 'COLOR=|MODE=|SERVICE='
exitkubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: echo
name: echo
spec:
replicas: 1
selector:
matchLabels:
app: echo
template:
metadata:
labels:
app: echo
spec:
containers:
- image: mendhak/http-https-echo:31
name: echo
volumeMounts:
- mountPath: /data/config
name: config
volumes:
- name: config
configMap:
name: echo
EOF
kubectl exec -ti deployment/echo -- sh
ls /data/config
cat /data/config
exit- Create a
postgresStatefulSet as before, but use aSecretto configurePOSTGRES_PASSWORD. - Check with
kubectl exec -ti postgres-0 -- psql -h 127.0.0.1 -U postgres
kubectl create serviceaccount pod-reader
kubectl get serviceaccount pod-readercat <<EOF >pod-reader-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pod-reader
subjects:
- kind: ServiceAccount
name: pod-reader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: pod-reader
EOFcat <<EOF >pod-reader-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-reader
labels:
app: pod-reader
spec:
replicas: 1
selector:
matchLabels:
app: pod-reader
template:
metadata:
labels:
app: pod-reader
spec:
serviceAccountName: pod-reader
containers:
- name: pod-reader
image: bitnami/kubectl:1.33.4
command:
- sleep
- "3600"
EOF
kubectl apply -f pod-reader-deployment.yaml
kubectl exec -ti deployment/pod-reader -- sh
kubectl get pods
kubectl delete pods --all # failedcat <<EOF >node-hello-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: node-hello
spec:
containers:
- name: node-hello
image: gcr.io/google-samples/node-hello:1.0
EOF
kubectl apply -f node-hello-pod.yaml
kubectl exec -ti node-hello -- bash
whoami
touch test.txt
ls -ls test.txt
exit
cat <<EOF >node-hello-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: node-hello
spec:
containers:
- name: node-hello
image: gcr.io/google-samples/node-hello:1.0
securityContext:
runAsUser: 1000
allowPrivilegeEscalation: false
EOF
kubectl apply -f node-hello-pod.yaml --force
kubectl exec -ti node-hello -- bash
whoami
touch test.txt
exit
kubectl delete -f node-hello-pod.yaml- Containers inside the same pod can communicate via
localhost. - Each pod in a cluster gets its own unique IP address.
- All pods can communicate with all other pods by default.
- Service provides a stable IP address or hostname instead of manually using IP of pods.
cat <<EOF >kubeapp-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubeapp
labels:
app: kubeapp
spec:
replicas: 2
selector:
matchLabels:
app: kubeapp
template:
metadata:
labels:
app: kubeapp
spec:
containers:
- name: kubeapp
image: kubenesia/kubeapp:1.2.0
ports:
- name: http
containerPort: 8000
EOF
cat <<EOF >kubeapp-service.yaml
apiVersion: v1
kind: Service
metadata:
name: kubeapp
labels:
app: kubeapp
spec:
type: ClusterIP
selector:
app: kubeapp
ports:
- port: 80
protocol: TCP
targetPort: 8000
EOF
kubectl apply -f kubeapp-deployment.yaml -f kubeapp-service.yaml
kubectl get svc
kubectl describe svc kubeapp
kubectl get ep
kubectl describe ep
kubectl get pods -o wide
kubectl get pods -l app=kubeapp
kubectl run test -ti --rm --image=kubenesia/kubebox -- sh
curl kubeapp
nslookup kubeapp
exitcat <<EOF >gogolele-service.yaml
apiVersion: v1
kind: Service
metadata:
name: gogolele
spec:
type: ExternalName
externalName: google.com
EOF
kubectl apply -f gogolele.yaml
kubectl get svc
kubectl describe svc get-ip
kubectl run test -it --rm --image=kubenesia/kubebox -- sh
curl gogolele
nslookup gogolele
exitcat <<EOF >kubeapp-service.yaml
apiVersion: v1
kind: Service
metadata:
name: kubeapp
spec:
type: NodePort
selector:
app: kubeapp
ports:
- port: 80
protocol: TCP
targetPort: 8000
nodePort: 31080
EOF
kubectl apply -f kubeapp-service.yaml
kubectl get svc
kubectl describe svc kubeapp
curl 127.0.0.1:31080- Used to bypass the cluster-wide IP address, name will be resolved directly to pod IPs.
cat <<EOF >kubeapp-headless-service.yaml
apiVersion: v1
kind: Service
metadata:
name: kubeapp-headless
spec:
type: ClusterIP
clusterIP: None
selector:
app: kubeapp
ports:
- port: 80
protocol: TCP
targetPort: 8000
EOF
kubectl apply -f kubeapp-headless-service.yaml
kubectl get svc | grep kubeapp
kubectl describe svc kubeapp
kubectl describe svc kubeapp-headless
kubectl run test -it --rm --image=kubenesia/kubebox -- sh
nslookup kubeapp
nslookup kubeapp-headless
curl kubeapp
curl kubeapp-headless:8000- Run the following commands:
cat <<EOF >nginx-service.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
selector:
application: nginx
role: web
ports:
- port: 80
protocol: TCP
targetPort: 8080
EOF
kubectl apply -f nginx-service.yaml
kubectl create deployment nginx --image=public.ecr.aws/docker/library/nginx:1.27.2
kubectl port-forward svc/nginx --address 0.0.0.0 1234:8080
curl localhost:1234- Does it work? If not, what's wrong?
- Create a Deployment
echowith imagemendhak/http-https-echo:31. Create a Service with typeNodePortto expose the previous Deployment with the same name. The application port is8080. Also create a HPA with target CPU utilization of 60% and max replicas of 5.
- Route HTTP/HTTPS traffic into cluster workloads.
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.0-beta.0/deploy/static/provider/baremetal/deploy.yaml
kubectl get pods -n ingress-nginx
kubectl get svc -n ingress-nginxkubectl create deployment blue --image=mendhak/http-https-echo:31
kubectl create deployment green --image=mendhak/http-https-echo:31
kubectl expose deployment blue --port=80 --target-port=8080
kubectl expose deployment green --port=80 --target-port=8080kubectl create ingress blue --class=nginx --rule="blue.example.com/*=blue:80"
kubectl create ingress green --class=nginx --rule="green.example.com/*=green:80"
kubectl get ingress
kubectl get ingress blue -o yaml
kubectl get ingress green -o yaml
export NODE_PORT=$(kubectl get svc -n ingress-nginx ingress-nginx-controller -o yaml | yq '.spec.ports[0].nodePort')
echo $NODE_PORT
curl --connect-to ::127.0.0.1:$NODE_PORT http://blue.example.com
curl --connect-to ::127.0.0.1:$NODE_PORT http://green.example.comcat <<EOF >blue-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: blue
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: blue
port:
number: 80
path: /blue
pathType: Prefix
EOF
kubectl apply -f blue-ingress.yaml
cat <<EOF >green-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: green
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: green
port:
number: 80
path: /green
pathType: Prefix
EOF
kubectl apply -f green-ingress.yaml
curl --connect-to ::127.0.0.1:$NODE_PORT/blue
curl --connect-to ::127.0.0.1:$NODE_PORT/green- Gateway API is an official Kubernetes project focused on L4 and L7 routing in Kubernetes.
- Intended to be the next generation of Kubernetes Ingress, Load Balancing, and Service Mesh APIs.
- Designed to be generic, expressive, and role-oriented.
nginx-fabric-gateway will be used here as Gateway API implementation. For other implementations, check here https://gateway-api.sigs.k8s.io/implementations/.
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.3.0/standard-install.yamlcurl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bashcat <<EOF >/tmp/nginx-gateway.yaml
nginx:
kind: daemonSet
service:
type: NodePort
nodePorts:
- port: 30080
listenerPort: 80
EOF
helm install nginx-gateway oci://ghcr.io/nginx/charts/nginx-gateway-fabric --version 2.0.2 --create-namespace -n nginx-gateway -f /tmp/nginx-gateway.yamlkubectl apply -f- <<EOF
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: nginx-gateway
namespace: nginx-gateway
spec:
gatewayClassName: nginx
listeners:
- protocol: HTTP
port: 80
name: http
allowedRoutes:
namespaces:
from: All
EOFkubectl create deployment blue --image=mendhak/http-https-echo:31
kubectl create deployment green --image=mendhak/http-https-echo:31
kubectl expose deployment blue --port=80 --target-port=8080
kubectl expose deployment green --port=80 --target-port=8080
kubectl apply -f - <<EOF
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: blue
labels:
app: blue
spec:
hostnames:
- blue.example.com
parentRefs:
- name: nginx-gateway
namespace: nginx-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- kind: Service
name: blue
port: 80
EOF
kubectl apply -f - <<EOF
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: green
labels:
app: green
spec:
hostnames:
- green.example.com
parentRefs:
- name: nginx-gateway
namespace: nginx-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- kind: Service
name: green
port: 80
EOF
curl --connect-to ::127.0.0.1:30080 blue.example.com
curl --connect-to ::127.0.0.1:30080 green.example.comkubectl apply -f - <<EOF
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: blue
labels:
app: blue
spec:
parentRefs:
- name: nginx-gateway
namespace: nginx-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /blue
backendRefs:
- kind: Service
name: blue
port: 80
EOF
kubectl apply -f - <<EOF
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: green
labels:
app: green
spec:
parentRefs:
- name: nginx-gateway
namespace: nginx-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /green
backendRefs:
- kind: Service
name: green
port: 80
EOF
curl --connect-to ::127.0.0.1:30080 example.com/blue
curl --connect-to ::127.0.0.1:30080 example.com/greenkubectl apply -f - <<EOF
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: weight
spec:
parentRefs:
- name: nginx-gateway
namespace: nginx-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /weight
backendRefs:
- kind: Service
name: blue
port: 80
weight: 50
- kind: Service
name: green
port: 80
weight: 50
EOF
curl --connect-to ::127.0.0.1:30080 example.com/weight
curl --connect-to ::127.0.0.1:30080 example.com/weight
curl --connect-to ::127.0.0.1:30080 example.com/weight
curl --connect-to ::127.0.0.1:30080 example.com/weight- Create two deployments
blueandgreenwith imagemendhak/http-https-echo:31. - The deployment should be accessible on
loadbalance.comwith 80:20 balancing between blue and green. Use HTTPRoute with nameloadbalanceto achieve this. - Run
curl -s --connect-to ::127.0.0.1:30180 loadbalance.com | grep hostnamemultiple times to check the result.
Limit network connection between pods.
# Create target workload
kubectl create deployment kubeapp --image=kubenesia/kubeapp:1.2.0 --port=8000
kubectl expose deployment kubeapp --port=80 --target-port=8000
# Create NetworkPolicy to prevent access from other namespace
cat <<EOF >netpol-deny-from-other-ns.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: default
name: deny-from-other-namespaces
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
EOF
kubectl apply -f netpol-deny-from-other-ns.yaml
# Check NetworkPolicy
kubectl get networkpolicy
# Test access from other namespace
kubectl create ns dev
kubectl run test -it -n dev --rm --image=kubenesia/kubebox -- sh
wget -qO- --timeout=2 kubeapp.default # fail
kubectl run test -it --rm --image=kubenesia/kubebox -- sh
wget -qO- --timeout=2 kubeapp # ok
# Remove NetworkPolicy
kubectl delete netpol deny-from-other-namespaceskubectl create ns marketplace
kubectl create deployment backend --namespace=marketplace --image=nginx:1.27.2
kubectl create deployment frontend --namespace=marketplace --image=nginx:1.27.2
kubectl create deployment experiment --namespace=marketplace --image=nginx:1.27.2
kubectl expose deployment backend --namespace=marketplace --port=80
kubectl expose deployment frontend --namespace=marketplace --port=80
kubectl expose deployment experiment --namespace=marketplace --port=80
cat <<EOF >marketplace-netpol.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-backend-from-frontend
namespace: marketplace
spec:
podSelector:
matchLabels:
app: backend
ingress:
- from:
- podSelector:
matchLabels:
app: frontend
EOF
kubectl apply -f marketplace-netpol.yaml
kubectl exec -n marketplace -ti deployment/frontend -- bash
curl -m 5 backend.marketplace # ok
kubectl exec -n marketplace -ti deployment/experiment -- bash
curl -m 5 backend.marketplace # fail
kubectl delete -f marketplace-netpol.yamlcat <<EOF >allow-from-specific-namespace-netpol.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-backend-from-research
namespace: marketplace
spec:
podSelector:
matchLabels:
app: backend
ingress:
- from:
- namespaceSelector:
matchLabels:
department: research
EOF
kubectl apply -f allow-from-specific-namespace-netpol.yaml
kubectl create namespace foobar
kubectl label namespace foobar department=research
kubectl run test -it -n foobar --rm --image=kubenesia/kubebox -- sh
curl -m 5 backend.marketplace # ok
kubectl run test -it -n default --rm --image=kubenesia/kubebox -- sh
curl -m 5 backend.marketplace # fail- Create new namespaces
rideandfood. - Inside namespace
ride, create 3 deployments:gatewaytravelshadow
- Inside namespace
food, create 3 deployments:ordertenantpromo
- Create NetworkPolicy
allow-from-rideon namespacefoodto allow access from namespaceride. - Create NetworkPolicy
allow-gateway-from-travelon namespacerideto only allow pod with labelapp=travelto access pod with labelapp=gatewayinside the same namespace. - All deployments should use image
nginx:1.27.2. - All deployments should have a Service with type
ClusterIP.