From 5ecb089bea3cd561bc64ffdc0ec53a203afff9d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vedran=20Ivankovi=C4=87?= <33936733+Veki301@users.noreply.github.com> Date: Tue, 28 Oct 2025 15:40:59 +0100 Subject: [PATCH 1/2] update k8s renew certs --- .../scenario-1_k8s-v1.14-kubespray.md | 238 ------------------ .../scenario-1_k8s-v1.28-kubespray.md | 209 +++++++++++++++ 2 files changed, 209 insertions(+), 238 deletions(-) delete mode 100644 src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.14-kubespray.md create mode 100644 src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.28-kubespray.md diff --git a/src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.14-kubespray.md b/src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.14-kubespray.md deleted file mode 100644 index 938badd..0000000 --- a/src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.14-kubespray.md +++ /dev/null @@ -1,238 +0,0 @@ -# How to renew certificates on kubernetes 1.14.x - -Kubernetes-internal certificates by default (see assumptions) expire after one year. Without renewal, your installation will cease to function. -This page explains how to renew certificates. - -## Assumptions - -- Kubernetes version 1.14.x -- installed with the help of [Kubespray](https://github.com/kubernetes-sigs/kubespray) - - This page was tested using kubespray release 2.10 branch from 2019-05-20, i.e. commit `e2f5a9748e4dbfe2fdba7931198b0b5f1f4bdc7e`. -- setup: 3 scheduled nodes, each hosting master (control plane) + - worker (kubelet) + etcd (cluster state, key-value database) - -*NOTE: due to Kubernetes being installed with Kubespray, the Kubernetes -CAs (expire after 10yr) as well as certificates involved in etcd -communication (expire after 100yr) are not required to be renewed (any -time soon).* - -**Official documentation:** - -- [Certificate Management with kubeadm (v1.14)](https://v1-14.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/) -- [PKI certificates and requirements (v1.14)](https://v1-14.docs.kubernetes.io/docs/setup/best-practices/certificates/) - -## High-level description - -1. verify current expiration date -2. issue new certificates -3. generate new client configuration (aka kubeconfig file) -4. restart control plane -5. drain node - restart kubelet - uncordon node again -6. repeat 3-5 on all other nodes - -## Step-by-step instructions - -*Please note, that the following instructions may require privileged -execution. So, either switch to a privileged user or prepend following -statements with \`\`sudo\`\`. In any case, it is most likely that every -newly created file has to be owned by \`\`root\`\`, depending on kow -Kubernetes was installed.* - -1. Verify current expiration date on each node - -```bash -export K8S_CERT_DIR=/etc/kubernetes/pki -export ETCD_CERT_DIR=/etc/ssl/etcd/ssl -export KUBELET_CERT_DIR=/var/lib/kubelet/pki - - -for crt in ${K8S_CERT_DIR}/*.crt; do - expirationDate=$(openssl x509 -noout -text -in ${crt} | grep After | sed -e 's/^[[:space:]]*//') - echo "$(basename ${crt}) -- ${expirationDate}" -done - - -for crt in $(ls ${ETCD_CERT_DIR}/*.pem | grep -v 'key'); do - expirationDate=$(openssl x509 -noout -text -in ${crt} | grep After | sed -e 's/^[[:space:]]*//') - echo "$(basename ${crt}) -- ${expirationDate}" -done - -echo "kubelet-client-current.pem -- $(openssl x509 -noout -text -in ${KUBELET_CERT_DIR}/kubelet-client-current.pem | grep After | sed -e 's/^[[:space:]]*//')" -echo "kubelet.crt -- $(openssl x509 -noout -text -in ${KUBELET_CERT_DIR}/kubelet.crt | grep After | sed -e 's/^[[:space:]]*//')" - - -# MASTER: api-server cert -echo -n | openssl s_client -connect localhost:6443 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not -# MASTER: controller-manager cert -echo -n | openssl s_client -connect localhost:10257 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not -# MASTER: scheduler cert -echo -n | openssl s_client -connect localhost:10259 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not - -# WORKER: kubelet cert -echo -n | openssl s_client -connect localhost:10250 2>&1 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | openssl x509 -text -noout | grep Not -``` - -1. Allocate a terminal session on one node and backup existing - certificates & configurations - -```bash -cd /etc/kubernetes - -cp -r ./ssl ./ssl.bkp - -cp admin.conf admin.conf.bkp -cp controller-manager.conf controller-manager.conf.bkp -cp scheduler.conf scheduler.conf.bkp -cp kubelet.conf kubelet.conf.bkp -``` - -1. Renew certificates on that very node - -```bash -kubeadm alpha certs renew apiserver -kubeadm alpha certs renew apiserver-kubelet-client -kubeadm alpha certs renew front-proxy-client -``` - -*Looking at the timestamps of the certificates, it is indicated, that apicerver, kubelet & proxy-client have been -renewed. This can be confirmed, by executing parts of (1).* - -```default -root@kubenode01:/etc/kubernetes$ ls -al ./ssl -total 56 -drwxr-xr-x 2 kube root 4096 Mar 20 17:09 . -drwxr-xr-x 5 kube root 4096 Mar 20 17:08 .. --rw-r--r-- 1 root root 1517 Mar 20 15:12 apiserver.crt --rw------- 1 root root 1675 Mar 20 15:12 apiserver.key --rw-r--r-- 1 root root 1099 Mar 20 15:13 apiserver-kubelet-client.crt --rw------- 1 root root 1675 Mar 20 15:13 apiserver-kubelet-client.key --rw-r--r-- 1 root root 1025 Sep 23 14:53 ca.crt --rw------- 1 root root 1679 Sep 23 14:53 ca.key --rw-r--r-- 1 root root 1038 Sep 23 14:53 front-proxy-ca.crt --rw------- 1 root root 1679 Sep 23 14:53 front-proxy-ca.key --rw-r--r-- 1 root root 1058 Mar 20 15:13 front-proxy-client.crt --rw------- 1 root root 1675 Mar 20 15:13 front-proxy-client.key --rw------- 1 root root 1679 Sep 23 14:53 sa.key --rw------- 1 root root 451 Sep 23 14:53 sa.pub -``` - -1. Based on those renewed certificates, generate new kubeconfig files - -The first command assumes it’s being executed on a master node. You may need to swap `masters` with `nodes` in -case you are on a different sort of machines. - -```bash -kubeadm alpha kubeconfig user --org system:masters --client-name kubernetes-admin > /etc/kubernetes/admin.conf -kubeadm alpha kubeconfig user --client-name system:kube-controller-manager > /etc/kubernetes/controller-manager.conf -kubeadm alpha kubeconfig user --client-name system:kube-scheduler > /etc/kubernetes/scheduler.conf -``` - -*Again, check if ownership and permission for these files are the same -as all the others around them.* - -And, in case you are operating the cluster from the current node, you may want to replace the user’s kubeconfig. -Afterwards, compare the backup version with the new one, to see if any configuration (e.g. pre-configured *namespace*) -might need to be moved over, too. - -```bash -mv ~/.kube/config ~/.kube/config.bkp -cp /etc/kubernetes/admin.conf ~/.kube/config -chown $(id -u):$(id -g) ~/.kube/config -chmod 770 ~/.kube/config -``` - -1. Now that certificates and configuration files are in place, the - control plane must be restarted. They typically run in containers, so - the easiest way to trigger a restart, is to kill the processes - running in there. Use (1) to verify, that the expiration dates indeed - have been changed. - -```bash -kill -s SIGHUP $(pidof kube-apiserver) -kill -s SIGHUP $(pidof kube-controller-manager) -kill -s SIGHUP $(pidof kube-scheduler) -``` - -1. Make *kubelet* aware of the new certificate - -1. Drain the node - -```default -kubectl drain --delete-local-data --ignore-daemonsets $(hostname) -``` - -1. Stop the kubelet process - -```default -systemctl stop kubelet -``` - -1. Remove old certificates and configuration - -```default -mv /var/lib/kubelet/pki{,old} -mkdir /var/lib/kubelet/pki -``` - -1. Generate new kubeconfig file for the kubelet - -```default -kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:$(hostname) > /etc/kubernetes/kubelet.conf -``` - -1. Start kubelet again - -```default -systemctl start kubelet -``` - -1. [Optional] Verify kubelet has recognized certificate rotation - -```default -sleep 5 && systemctl status kubelet -``` - -1. Allow workload to be scheduled again on the node - -```default -kubectl uncordon $(hostname) -``` - -1. Copy certificates over to all the other nodes - -Option A - you can ssh from one kubernetes node to another - -```bash -# set the ip or hostname: -export NODE2=root@ip-or-hostname -export NODE3=... - -scp ./ssl/apiserver.* "${NODE2}:/etc/kubernetes/ssl/" -scp ./ssl/apiserver.* "${NODE3}:/etc/kubernetes/ssl/" - -scp ./ssl/apiserver-kubelet-client.* "${NODE2}:/etc/kubernetes/ssl/" -scp ./ssl/apiserver-kubelet-client.* "${NODE3}:/etc/kubernetes/ssl/" - -scp ./ssl/front-proxy-client.* "${NODE2}:/etc/kubernetes/ssl/" -scp ./ssl/front-proxy-client.* "${NODE3}:/etc/kubernetes/ssl/" -``` - -Option B - copy via local administrator’s machine - -```bash -# set the ip or hostname: -export NODE1=root@ip-or-hostname -export NODE2= -export NODE3= - -scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver.*" "${NODE2}:/etc/kubernetes/ssl/" -scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver.*" "${NODE3}:/etc/kubernetes/ssl/" - -scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver-kubelet-client.*" "${NODE2}:/etc/kubernetes/ssl/" -scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver-kubelet-client.*" "${NODE3}:/etc/kubernetes/ssl/" - -scp -3 "${NODE1}:/etc/kubernetes/ssl/front-proxy-client.*" "${NODE2}:/etc/kubernetes/ssl/" -scp -3 "${NODE1}:/etc/kubernetes/ssl/front-proxy-client.*" "${NODE3}:/etc/kubernetes/ssl/" -``` - -1. Continue again with (4) for each node that is left diff --git a/src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.28-kubespray.md b/src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.28-kubespray.md new file mode 100644 index 0000000..6d5715b --- /dev/null +++ b/src/how-to/administrate/kubernetes/certificate-renewal/scenario-1_k8s-v1.28-kubespray.md @@ -0,0 +1,209 @@ +# How to renew certificates on kubernetes 1.28.x + +Kubernetes-internal certificates by default (see assumptions) expire after one year. Without renewal, your installation will cease to function. +This page explains how to renew certificates. + +## Assumptions + +- Kubernetes version 1.28.x +- installed with the help of [Kubespray](https://github.com/kubernetes-sigs/kubespray) + - This page was tested using kubespray release 2.15 branch from 2024-12-18, i.e. commit `781f02fddab7700817949c2adfd9dbda21cc68d8`. +- setup: 3 scheduled nodes, each hosting master (control plane) + + worker (kubelet) + etcd (cluster state, key-value database) + +*NOTE: due to Kubernetes being installed with Kubespray, the Kubernetes +CAs (expire after 10yr) as well as certificates involved in etcd +communication (expire after 100yr) are not required to be renewed (any +time soon).* + +**Official documentation:** + +- [Certificate Management with kubeadm](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/) +- [PKI certificates and requirements](https://kubernetes.io/docs/setup/best-practices/certificates/) + +## High-level description + +1. verify current expiration date +2. issue new certificates +3. generate new client configuration (aka kubeconfig file) +4. restart control plane +5. drain node - restart kubelet - uncordon node again +6. repeat 3-5 on all other nodes + +## Automated way + +WIP: + +## Step-by-step instructions + +*Please note, that the following instructions may require privileged +execution. So, either switch to a privileged user or prepend following +statements with \`\`sudo\`\`. In any case, it is most likely that every +newly created file has to be owned by \`\`root\`\`, depending on kow +Kubernetes was installed.* + +1. Verify current expiration date on each node + +```bash +kubeadm certs check-expiration +``` + +2. Allocate a terminal session on one node and backup existing + certificates & configurations. You can skip creating backups if your certificates have already expired and your service is going down. + +```bash +cd /etc/kubernetes + +cp -r ./ssl ./ssl.bkp + +cp admin.conf admin.conf.bkp +cp controller-manager.conf controller-manager.conf.bkp +cp scheduler.conf scheduler.conf.bkp +cp kubelet.conf kubelet.conf.bkp +``` + +3. Renew certificates on that very node + +```bash +kubeadm certs renew all +``` + +*Looking at the timestamps of the certificates, it is indicated, that apicerver, kubelet & proxy-client have been +renewed. This can be confirmed, by executing step 1. again* + +4. Based on those renewed certificates, generate new kubeconfig files + +The first command assumes it’s being executed on a master node. You may need to swap `masters` with `nodes` in +case you are running your cluster differently (for on-prem, we usually run a 3-node cluster with all `master` nodes). + +```bash +kubeadm kubeconfig user --org system:masters --client-name kubernetes-admin > /etc/kubernetes/admin.conf +kubeadm kubeconfig user --client-name system:kube-controller-manager > /etc/kubernetes/controller-manager.conf +kubeadm kubeconfig user --client-name system:kube-scheduler > /etc/kubernetes/scheduler.conf +``` + +*Again, check if ownership and permission for these files are the same +as all the others around them.* + +And, in case you are operating the cluster from the current node, you may want to replace the user’s kubeconfig. +Afterwards, compare the backup version with the new one, to see if any configuration (e.g. pre-configured *namespace*) +might need to be moved over, too. + +```bash +mv ~/.kube/config ~/.kube/config.bkp +cp /etc/kubernetes/admin.conf ~/.kube/config +chown $(id -u):$(id -g) ~/.kube/config +chmod 770 ~/.kube/config +``` + +5. Now that certificates and configuration files are in place, the + control plane must be restarted. They typically run in containers, so + the easiest way to trigger a restart, is to kill the processes + running in there. Use (1) to verify, that the expiration dates indeed + have been changed. + +First, find the `kube-apiserver`, `kube-controller-manager` and `kube-scheduler` containers +```bash +crictl ps | grep kube +``` + +Now stop the containers by their IDs with (its the *first* one in the list): +```bash +crictl stop ID +``` + +6. Make *kubelet* aware of the new certificate + +You can check the expiration of kubelet cert with, sometimes it can be out of sync with `kubeadm` ones. We recommend keeping them in sync! + +```bash +openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -dates +``` + +7. Drain the node (optional, will cause a small downtime if you skip, skip if your certs are already expired) + +```bash +kubectl drain --delete-local-data --ignore-daemonsets $(hostname) +``` + +8. Stop the kubelet process + +```bash +systemctl stop kubelet +``` + +9. Remove old certificates and configuration + +```bash +mv /var/lib/kubelet/pki{,old} +mkdir /var/lib/kubelet/pki +``` + +10. Generate new kubeconfig file for the kubelet + +```bash +kubeadm kubeconfig user --org system:nodes --client-name system:node:$(hostname) > /etc/kubernetes/kubelet.conf +``` + +11. Start kubelet again + +```bash +systemctl start kubelet +``` + +12. [Optional] Verify kubelet has recognized certificate rotation + +```bash +sleep 5 && systemctl status kubelet +``` + +13. Check kubelet certs + +```bash +openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -dates +``` + +14. Allow workload to be scheduled again on the node (if you drained the node beforehand) + +```bash +kubectl uncordon $(hostname) +``` + +15. Copy certificates over to all the other nodes + +Option A - you can ssh from one kubernetes node to another + +```bash +# set the ip or hostname: +export NODE2=root@ip-or-hostname +export NODE3=... + +scp ./ssl/apiserver.* "${NODE2}:/etc/kubernetes/ssl/" +scp ./ssl/apiserver.* "${NODE3}:/etc/kubernetes/ssl/" + +scp ./ssl/apiserver-kubelet-client.* "${NODE2}:/etc/kubernetes/ssl/" +scp ./ssl/apiserver-kubelet-client.* "${NODE3}:/etc/kubernetes/ssl/" + +scp ./ssl/front-proxy-client.* "${NODE2}:/etc/kubernetes/ssl/" +scp ./ssl/front-proxy-client.* "${NODE3}:/etc/kubernetes/ssl/" +``` + +Option B - copy via local administrator’s machine + +```bash +# set the ip or hostname: +export NODE1=root@ip-or-hostname +export NODE2= +export NODE3= + +scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver.*" "${NODE2}:/etc/kubernetes/ssl/" +scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver.*" "${NODE3}:/etc/kubernetes/ssl/" + +scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver-kubelet-client.*" "${NODE2}:/etc/kubernetes/ssl/" +scp -3 "${NODE1}:/etc/kubernetes/ssl/apiserver-kubelet-client.*" "${NODE3}:/etc/kubernetes/ssl/" + +scp -3 "${NODE1}:/etc/kubernetes/ssl/front-proxy-client.*" "${NODE2}:/etc/kubernetes/ssl/" +scp -3 "${NODE1}:/etc/kubernetes/ssl/front-proxy-client.*" "${NODE3}:/etc/kubernetes/ssl/" +``` + +Now repeat the process from step (4) on each node that is left From 62df6f7ecf4f17b080a83cc68335118853bd0ae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vedran=20Ivankovi=C4=87?= <33936733+Veki301@users.noreply.github.com> Date: Tue, 28 Oct 2025 16:32:38 +0100 Subject: [PATCH 2/2] fix broken link --- .../administrate/kubernetes/certificate-renewal/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/how-to/administrate/kubernetes/certificate-renewal/README.md b/src/how-to/administrate/kubernetes/certificate-renewal/README.md index 683529b..9b93105 100644 --- a/src/how-to/administrate/kubernetes/certificate-renewal/README.md +++ b/src/how-to/administrate/kubernetes/certificate-renewal/README.md @@ -1,3 +1,3 @@ # Certificate renewal -* [How to renew certificates on kubernetes 1.14.x](scenario-1_k8s-v1.14-kubespray.md) +* [How to renew certificates on kubernetes 1.14.x](scenario-1_k8s-v1.28-kubespray.md)